Compare commits

..

No commits in common. "8d8fb8c212f707c381c1fa8ba828fafd58982e11" and "0537a5df6431ba63d32d375f2677671104761ff7" have entirely different histories.

4 changed files with 21 additions and 29 deletions

2
.gitignore vendored
View file

@ -1,2 +0,0 @@
*.safetensors
*.csv

17
data.py
View file

@ -18,26 +18,27 @@ def dataWorker(q, dataset, encoding, batch_size, block_size,chat):
batch_x, batch_y = [], []
while True:
for text in dataset:
tokens = []
tokens = None
if(chat):
txt=""
for msg in text['messages']:
role = msg['role']
content = msg['content']
txt = f"<|{role}|>{content}<|end|> "
tokens += encoding.encode(txt) + [encoding.eos_token_id]
txt = txt + f"<|{role}|>{content}<|end|>"
tokens = [encoding.bos_token_id]+encoding.encode(txt)
else:
tokens = encoding.encode(text["text"])
tokens = [encoding.bos_token_id]+encoding.encode(text["text"])
for i in range(0, len(tokens)-block_size+1,block_size):
x = tokens[i:i+block_size]
y = tokens[i+1:i+block_size+1]
if len(x) < block_size:
pad = len(x)-(block_size)
x = x + [encoding.eos_token_id] * pad
pad = len(x)-(block_size-1)
x = x + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
if len(y) < block_size:
pad = len(y)-(block_size)
y = y + [encoding.eos_token_id] * pad
pad = len(y)-(block_size-1)
y = y + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
batch_x.append(x)
batch_y.append(y)

View file

@ -1,13 +1,12 @@
from tinygrad import Tensor,nn,TinyJit
class MultiHeadAttention:
def __init__(self,embed_size,n_heads,lin):
def __init__(self,embed_size,n_heads):
assert embed_size % n_heads == 0
self.head_size = embed_size//n_heads
self.n_heads = n_heads
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
self.projection = nn.Linear(embed_size, embed_size,bias=False)
self.lin = lin
def __call__(self,x):
B,T,C=x.shape
@ -16,16 +15,10 @@ class MultiHeadAttention:
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
#B H T S
#TODO attention free transformer
out = None
if self.lin:
q = q.sigmoid()
k = k.sigmoid()
out = ((q*k).exp()/(q*k)) * v
else:
out = q.scaled_dot_product_attention(k,v,is_causal=True)
out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01)
out = out.transpose(1,2).view(B,T,C)
return self.projection(out)
def cast(self,dtype):
self.qkv.weight = self.qkv.weight.cast(dtype)
@ -50,8 +43,8 @@ class FeedForwardNetwork:
return self
class Block:
def __init__(self,embed_size,n_heads,lin):
self.mha = MultiHeadAttention(embed_size,n_heads,lin)
def __init__(self,embed_size,n_heads):
self.mha = MultiHeadAttention(embed_size,n_heads)
self.ffn = FeedForwardNetwork(embed_size)
self.mhaNorm = nn.RMSNorm(embed_size)
self.ffnNorm = nn.RMSNorm(embed_size)
@ -68,9 +61,9 @@ class Transformer():
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
self.tok_embed = nn.Embedding(vocab_size,embed_size)
self.pos_embed = nn.Embedding(block_size,embed_size)
self.pos_idx = Tensor.arange(block_size, requires_grad=False).sin()
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)]
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
self.norm = nn.RMSNorm(embed_size)
self.output = nn.Linear(embed_size,vocab_size,bias=False)
def __call__(self,x):

View file

@ -12,20 +12,20 @@ import sys
hypr = {
"embed_size": 768,
"n_heads": 8,
"n_heads": 12,
"n_blocks": 12,
"block_size": 512,
"batch_size": 8,
"starting_lr": 6e-4,
"minimum_lr": 6e-5,
"warmup": 5_000,
"steps": 535_000,
"encoding": "TinyLlama/TinyLlama_v1.1",
"warmup": 1_000,
"steps": 20_000,
"encoding": "gpt2",
"dataset": "HuggingFaceTB/smollm-corpus",
"subset": "cosmopedia-v2",
"chat_dataset": "HuggingFaceTB/smoltalk",
"chat_subset": "all",
"half": True,
"half": False,
}
print(Device.DEFAULT)