File size: 5,841 Bytes
5c71899
 
 
 
 
 
 
 
 
 
 
 
 
628d356
 
 
 
5c71899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
628d356
5c71899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2dc5b66
 
 
 
 
 
5c71899
 
 
 
 
 
 
 
 
 
 
 
 
628d356
 
 
 
 
5c71899
 
 
 
 
628d356
5c71899
 
 
 
628d356
 
 
 
5c71899
 
 
 
 
 
 
 
2dc5b66
5c71899
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168

import torch
import torch.nn as nn
from torch.nn import functional as F
from transformers import PretrainedConfig, PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast

class SykoConfig(PretrainedConfig):
    model_type = "syko"
    
    def __init__(
        self,
        vocab_size=4096,
        n_embd=384,      # ARTIRILDI (Eskisi 256)
        n_layer=8,       # ARTIRILDI (Eskisi 6)
        n_head=6,        # AYARLANDI (384 / 64 = 6)
        block_size=256,  # ARTIRILDI (Eskisi 64) -> Daha uzun hafıza
        dropout=0.2,
        **kwargs
    ):
        self.vocab_size = vocab_size
        self.n_embd = n_embd
        self.n_layer = n_layer
        self.n_head = n_head
        self.block_size = block_size
        self.dropout = dropout
        
        self.num_hidden_layers = n_layer      
        self.hidden_size = n_embd             
        self.num_attention_heads = n_head     
        
        super().__init__(**kwargs)

class Head(nn.Module):
    def __init__(self, n_embd, head_size, block_size, dropout):
        super().__init__()
        self.key = nn.Linear(n_embd, head_size, bias=False)
        self.query = nn.Linear(n_embd, head_size, bias=False)
        self.value = nn.Linear(n_embd, head_size, bias=False)
        self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        B, T, C = x.shape
        k = self.key(x)
        q = self.query(x)
        wei = q @ k.transpose(-2, -1) * (C ** -0.5)
        # Maskeleme dinamik olmalı (gelen T kadar)
        wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
        wei = F.softmax(wei, dim=-1)
        wei = self.dropout(wei)
        v = self.value(x)
        out = wei @ v
        return out

class MultiHeadAttention(nn.Module):
    def __init__(self, n_head, head_size, n_embd, block_size, dropout):
        super().__init__()
        self.heads = nn.ModuleList([Head(n_embd, head_size, block_size, dropout) for _ in range(n_head)])
        self.proj = nn.Linear(n_embd, n_embd)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        out = torch.cat([h(x) for h in self.heads], dim=-1)
        out = self.dropout(self.proj(out))
        return out

class FeedForward(nn.Module):
    def __init__(self, n_embd, dropout):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(n_embd, 4 * n_embd),
            nn.GELU(),
            nn.Linear(4 * n_embd, n_embd),
            nn.Dropout(dropout),
        )

    def forward(self, x):
        return self.net(x)

class Block(nn.Module):
    def __init__(self, n_embd, n_head, block_size, dropout):
        super().__init__()
        head_size = n_embd // n_head
        self.sa = MultiHeadAttention(n_head, head_size, n_embd, block_size, dropout)
        self.ffwd = FeedForward(n_embd, dropout)
        self.ln1 = nn.LayerNorm(n_embd)
        self.ln2 = nn.LayerNorm(n_embd)

    def forward(self, x):
        x = x + self.sa(self.ln1(x))
        x = x + self.ffwd(self.ln2(x))
        return x

class SykoForCausalLM(PreTrainedModel):
    config_class = SykoConfig

    def __init__(self, config):
        super().__init__(config)
        self.vocab_size = config.vocab_size
        self.n_embd = config.n_embd
        self.block_size = config.block_size
        self.n_head = config.n_head
        self.n_layer = config.n_layer
        self.dropout = config.dropout
        
        self.token_embedding_table = nn.Embedding(self.vocab_size, self.n_embd)
        self.position_embedding_table = nn.Embedding(self.block_size, self.n_embd)
        self.blocks = nn.Sequential(*[Block(self.n_embd, self.n_head, self.block_size, self.dropout) for _ in range(self.n_layer)])
        self.ln_f = nn.LayerNorm(self.n_embd)
        self.lm_head = nn.Linear(self.n_embd, self.vocab_size)
        
        self.apply(self._init_weights)

    def get_input_embeddings(self):
        return self.token_embedding_table

    def set_input_embeddings(self, new_embeddings):
        self.token_embedding_table = new_embeddings

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
            if module.bias is not None:
                torch.nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)

    def forward(self, input_ids, labels=None, **kwargs):
        idx = input_ids
        B, T = idx.shape
        device = idx.device
        
        # Eğer context (T), block_size'dan büyükse kırp (Safety check)
        if T > self.block_size:
            idx = idx[:, -self.block_size:]
            T = self.block_size
            
        pos_emb = self.position_embedding_table(torch.arange(T, device=device))
        tok_emb = self.token_embedding_table(idx)
        x = tok_emb + pos_emb
        
        x = self.blocks(x)
        x = self.ln1_f(x) if hasattr(self, 'ln1_f') else self.ln_f(x)
        logits = self.lm_head(x)

        loss = None
        if labels is not None:
            # Labels da kırpılmalı eğer idx kırpıldıysa
            if labels.shape[1] > T:
                 labels = labels[:, -T:]
                 
            B, T, C = logits.shape
            logits_reshaped = logits.view(B*T, C)
            labels_reshaped = labels.view(B*T)
            loss = F.cross_entropy(logits_reshaped, labels_reshaped)

        return CausalLMOutputWithPast(
            loss=loss,
            logits=logits,
            past_key_values=None,
            hidden_states=None,
            attentions=None,
        )

    def prepare_inputs_for_generation(self, input_ids, **kwargs):
        return {"input_ids": input_ids}