Compare commits

..

15 Commits

Author SHA1 Message Date
k
98282675b0 Fixed embeding mistake 2026-03-05 15:13:34 -05:00
k
8d8fb8c212 experimental "lin" blocks insted of attention sparely 2026-02-27 09:13:24 -05:00
k
89c9d01cb8 More training with less heads 2026-02-27 09:13:01 -05:00
k
dc231ae703 fixed bos token being prepended twice 2026-02-27 09:11:33 -05:00
k
a0cd98876c added gitignore 2026-01-13 21:11:32 -05:00
k
0537a5df64 changed chat dataset. 2026-01-09 17:30:34 -05:00
k
c78a31362a set to gpt2 hyprs 2026-01-09 12:45:01 -05:00
k
496916f428 added fine-tuning 2026-01-07 13:01:06 -05:00
k
121640bab6 updated hypr for my gpu 2026-01-07 12:59:44 -05:00
k
6f037c4a9a Quick training script 2026-01-07 02:14:09 -05:00
k
7f25dff1d1 Fix errors 2026-01-07 02:13:08 -05:00
k
007c96e91b Simple log functions 2026-01-07 01:25:47 -05:00
k
6daa8ec46c Added code to generate training batches 2026-01-07 01:15:18 -05:00
k
229c564811 CosineAnnealing with optimizer Group 2026-01-07 00:26:04 -05:00
k
478010c8cc added Positional encodeings 2026-01-06 21:38:12 -05:00
6 changed files with 211 additions and 8 deletions

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.safetensors
*.csv

48
data.py Normal file
View File

@@ -0,0 +1,48 @@
import numpy as np
import threading
import queue
def startDataWorker(dataset,encoding,batch_size,block_size,chat):
data_q = queue.Queue(maxsize=100)
t = threading.Thread(target=dataWorker, args=(data_q, dataset, encoding, batch_size, block_size,chat), daemon=True)
t.start()
while (1):
try:
bx, by = data_q.get(timeout=30)
except queue.Empty:
print("queue empty ...")
continue
yield (bx,by)
def dataWorker(q, dataset, encoding, batch_size, block_size,chat):
batch_x, batch_y = [], []
while True:
for text in dataset:
tokens = []
if(chat):
for msg in text['messages']:
role = msg['role']
content = msg['content']
txt = f"<|{role}|>{content}<|end|> "
tokens += encoding.encode(txt) + [encoding.eos_token_id]
else:
tokens = encoding.encode(text["text"])
for i in range(0, len(tokens)-block_size+1,block_size):
x = tokens[i:i+block_size]
y = tokens[i+1:i+block_size+1]
if len(x) < block_size:
pad = len(x)-(block_size)
x = x + [encoding.eos_token_id] * pad
if len(y) < block_size:
pad = len(y)-(block_size)
y = y + [encoding.eos_token_id] * pad
batch_x.append(x)
batch_y.append(y)
if len(batch_x) == batch_size:
q.put((np.array(batch_x, dtype=np.int32),
np.array(batch_y, dtype=np.int32)))
batch_x, batch_y = [], []

15
log.py Normal file
View File

@@ -0,0 +1,15 @@
from tinygrad.nn.state import safe_save
import csv
import os
def logLoss(step, loss):
path = "loss.csv"
exists = os.path.isfile(path)
with open(path, mode='a', newline='') as f:
writer = csv.writer(f)
if not exists:
writer.writerow(['step', 'loss'])
writer.writerow([step, float(loss)])
def logModel(step,stateDict):
safe_save(stateDict, f"gpt_{step}.safetensors")

View File

@@ -1,12 +1,13 @@
from tinygrad import Tensor,nn,TinyJit from tinygrad import Tensor,nn,TinyJit
class MultiHeadAttention: class MultiHeadAttention:
def __init__(self,embed_size,n_heads): def __init__(self,embed_size,n_heads,lin):
assert embed_size % n_heads == 0 assert embed_size % n_heads == 0
self.head_size = embed_size//n_heads self.head_size = embed_size//n_heads
self.n_heads = n_heads self.n_heads = n_heads
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False) self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
self.projection = nn.Linear(embed_size, embed_size,bias=False) self.projection = nn.Linear(embed_size, embed_size,bias=False)
self.lin = lin
def __call__(self,x): def __call__(self,x):
B,T,C=x.shape B,T,C=x.shape
@@ -15,10 +16,16 @@ class MultiHeadAttention:
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2) k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2) v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
#B H T S #B H T S
#TODO attention free transformer
out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01) out = None
if self.lin:
q = q.sigmoid()
k = k.sigmoid()
out = ((q*k).exp()/(q*k)) * v
else:
out = q.scaled_dot_product_attention(k,v,is_causal=True)
out = out.transpose(1,2).view(B,T,C) out = out.transpose(1,2).view(B,T,C)
return self.projection(out) return self.projection(out)
def cast(self,dtype): def cast(self,dtype):
self.qkv.weight = self.qkv.weight.cast(dtype) self.qkv.weight = self.qkv.weight.cast(dtype)
@@ -43,8 +50,8 @@ class FeedForwardNetwork:
return self return self
class Block: class Block:
def __init__(self,embed_size,n_heads): def __init__(self,embed_size,n_heads,lin):
self.mha = MultiHeadAttention(embed_size,n_heads) self.mha = MultiHeadAttention(embed_size,n_heads,lin)
self.ffn = FeedForwardNetwork(embed_size) self.ffn = FeedForwardNetwork(embed_size)
self.mhaNorm = nn.RMSNorm(embed_size) self.mhaNorm = nn.RMSNorm(embed_size)
self.ffnNorm = nn.RMSNorm(embed_size) self.ffnNorm = nn.RMSNorm(embed_size)
@@ -58,13 +65,18 @@ class Block:
return self return self
class Transformer(): class Transformer():
def __init__(self,vocab_size,embed_size,n_heads,n_blocks): def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
self.tok_embed = nn.Embedding(vocab_size,embed_size) self.tok_embed = nn.Embedding(vocab_size,embed_size)
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)] self.pos_embed = nn.Embedding(block_size,embed_size)
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)]
self.norm = nn.RMSNorm(embed_size) self.norm = nn.RMSNorm(embed_size)
self.output = nn.Linear(embed_size,vocab_size,bias=False) self.output = nn.Linear(embed_size,vocab_size,bias=False)
def __call__(self,x): def __call__(self,x):
x = self.tok_embed(x) B,T = x.shape
pos_embeds = self.pos_embed(self.pos_idx[:T])
x = self.tok_embed(x) + pos_embeds
x = x.sequential(self.blocks) x = x.sequential(self.blocks)
x = self.norm(x) x = self.norm(x)
return self.output(x) return self.output(x)

30
optm.py Normal file
View File

@@ -0,0 +1,30 @@
from tinygrad import Tensor,nn
import math
class CosineLR:
def __init__(self,optm,totalSteps,maxlr,minlr):
self.optm = optm
self.maxlr = maxlr
self.minlr = minlr
self.totalSteps = totalSteps
self.steps = 0
def step(self):
lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((self.steps / self.totalSteps) * math.pi))
for o in self.optm:
o.lr = lr
self.optm.step()
self.steps += 1
def zero_grad(self):
self.optm.zero_grad()
def llmOptimizer(params,steps,maxlr,minlr):
muon_params = [p for p in params if len(p.shape) >= 2]
adamw_params = [p for p in params if len(p.shape) < 2]
o1 = nn.optim.Muon(muon_params, lr=maxlr)
o2 = nn.optim.AdamW(adamw_params, lr=maxlr)
optimizer = nn.optim.OptimizerGroup(o1,o2)
return CosineLR(optimizer,steps,maxlr,minlr)

96
train.py Normal file
View File

@@ -0,0 +1,96 @@
from tinygrad.nn.state import get_state_dict,safe_load, load_state_dict
from concurrent.futures import ThreadPoolExecutor
from tinygrad import Tensor,TinyJit,Device,nn
from transformers import AutoTokenizer
from datasets import load_dataset
from model import Transformer
from tqdm import tqdm
import optm
import data
import log
import sys
hypr = {
"embed_size": 768,
"n_heads": 8,
"n_blocks": 12,
"block_size": 512,
"batch_size": 8,
"starting_lr": 6e-4,
"minimum_lr": 6e-5,
"warmup": 5_000,
"steps": 535_000,
"encoding": "TinyLlama/TinyLlama_v1.1",
"dataset": "HuggingFaceTB/smollm-corpus",
"subset": "cosmopedia-v2",
"chat_dataset": "HuggingFaceTB/smoltalk",
"chat_subset": "all",
"half": True,
}
print(Device.DEFAULT)
chat = len(sys.argv) > 1
if(chat):
hypr["dataset"] = hypr["chat_dataset"]
hypr["subset"] = hypr["chat_subset"]
hypr["starting_lr"] *= 0.1
hypr["minimum_lr"] *= 0.1
#for loging
loger = ThreadPoolExecutor(max_workers=2)
dataset = load_dataset(hypr["dataset"],
hypr["subset"],
split="train",
streaming=True)
encoding = AutoTokenizer.from_pretrained(hypr["encoding"])
if encoding.pad_token_id == None:
encoding.pad_token_id=encoding.eos_token_id
hypr["vocab_size"] = encoding.vocab_size
batch = data.startDataWorker(dataset,encoding,hypr["batch_size"],hypr["block_size"],chat)
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
if (chat):
load_state_dict(model,safe_load(sys.argv[1]))
if hypr["half"]:
from tinygrad import dtypes
model = model.cast(dtypes.float16)
params = nn.state.get_parameters(model)
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
@TinyJit
def step(x,y):
optimizer.zero_grad()
logits = model(x)
B,T,C = logits.shape
logits = logits.view(B*T,C)
y = y.view(B*T)
loss = logits.cross_entropy(y)
loss.backward()
optimizer.step()
return loss
Tensor.training=True
bar = tqdm(range(hypr["steps"]))
for steps in bar:
nx, ny = next(batch)
x = Tensor(nx, device=Device.DEFAULT).realize()
y = Tensor(ny, device=Device.DEFAULT).realize()
loss = step(x, y)
if steps % 10 == 0:
l = loss.numpy()
loger.submit(log.logLoss, steps, l)
bar.set_postfix(loss= f"{l:.4f}")
if steps % 500 == 0:
loss.realize()
m = get_state_dict(model)
log.logModel(steps,m)
#TODO non sycronus safetensor loging
#loger.submit(log.logModel,steps,m)
m = get_state_dict(model)
log.logModel("final",m)
loger.shutdown(wait=True)