diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4f4d16f --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +*.safetensors +*.csv diff --git a/data.py b/data.py index 8bef979..d3414a5 100644 --- a/data.py +++ b/data.py @@ -18,27 +18,26 @@ def dataWorker(q, dataset, encoding, batch_size, block_size,chat): batch_x, batch_y = [], [] while True: for text in dataset: - tokens = None + tokens = [] if(chat): - txt="" for msg in text['messages']: role = msg['role'] content = msg['content'] - txt = txt + f"<|{role}|>{content}<|end|>" - tokens = [encoding.bos_token_id]+encoding.encode(txt) + txt = f"<|{role}|>{content}<|end|> " + tokens += encoding.encode(txt) + [encoding.eos_token_id] else: - tokens = [encoding.bos_token_id]+encoding.encode(text["text"]) + tokens = encoding.encode(text["text"]) for i in range(0, len(tokens)-block_size+1,block_size): x = tokens[i:i+block_size] y = tokens[i+1:i+block_size+1] if len(x) < block_size: - pad = len(x)-(block_size-1) - x = x + [encoding.eos_token_id] + [encoding.pad_token_id] * pad + pad = len(x)-(block_size) + x = x + [encoding.eos_token_id] * pad if len(y) < block_size: - pad = len(y)-(block_size-1) - y = y + [encoding.eos_token_id] + [encoding.pad_token_id] * pad + pad = len(y)-(block_size) + y = y + [encoding.eos_token_id] * pad batch_x.append(x) batch_y.append(y) diff --git a/model.py b/model.py index 8df1c46..e484430 100644 --- a/model.py +++ b/model.py @@ -1,12 +1,13 @@ from tinygrad import Tensor,nn,TinyJit class MultiHeadAttention: - def __init__(self,embed_size,n_heads): + def __init__(self,embed_size,n_heads,lin): assert embed_size % n_heads == 0 self.head_size = embed_size//n_heads self.n_heads = n_heads self.qkv = nn.Linear(embed_size, embed_size*3,bias=False) self.projection = nn.Linear(embed_size, embed_size,bias=False) + self.lin = lin def __call__(self,x): B,T,C=x.shape @@ -15,10 +16,16 @@ class MultiHeadAttention: k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2) v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2) #B H T S - #TODO attention free transformer - out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01) + out = None + if self.lin: + q = q.sigmoid() + k = k.sigmoid() + out = ((q*k).exp()/(q*k)) * v + else: + out = q.scaled_dot_product_attention(k,v,is_causal=True) out = out.transpose(1,2).view(B,T,C) + return self.projection(out) def cast(self,dtype): self.qkv.weight = self.qkv.weight.cast(dtype) @@ -43,8 +50,8 @@ class FeedForwardNetwork: return self class Block: - def __init__(self,embed_size,n_heads): - self.mha = MultiHeadAttention(embed_size,n_heads) + def __init__(self,embed_size,n_heads,lin): + self.mha = MultiHeadAttention(embed_size,n_heads,lin) self.ffn = FeedForwardNetwork(embed_size) self.mhaNorm = nn.RMSNorm(embed_size) self.ffnNorm = nn.RMSNorm(embed_size) @@ -61,9 +68,9 @@ class Transformer(): def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size): self.tok_embed = nn.Embedding(vocab_size,embed_size) self.pos_embed = nn.Embedding(block_size,embed_size) - self.pos_idx = Tensor.arange(block_size, requires_grad=False) + self.pos_idx = Tensor.arange(block_size, requires_grad=False).sin() - self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)] + self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)] self.norm = nn.RMSNorm(embed_size) self.output = nn.Linear(embed_size,vocab_size,bias=False) def __call__(self,x): diff --git a/train.py b/train.py index c0c7f3e..80cfb27 100644 --- a/train.py +++ b/train.py @@ -12,20 +12,20 @@ import sys hypr = { "embed_size": 768, - "n_heads": 12, + "n_heads": 8, "n_blocks": 12, "block_size": 512, "batch_size": 8, "starting_lr": 6e-4, "minimum_lr": 6e-5, - "warmup": 1_000, - "steps": 20_000, - "encoding": "gpt2", + "warmup": 5_000, + "steps": 535_000, + "encoding": "TinyLlama/TinyLlama_v1.1", "dataset": "HuggingFaceTB/smollm-corpus", "subset": "cosmopedia-v2", "chat_dataset": "HuggingFaceTB/smoltalk", "chat_subset": "all", - "half": False, + "half": True, } print(Device.DEFAULT)