Compare commits

...

2 Commits

Author SHA1 Message Date
k
6f037c4a9a Quick training script 2026-01-07 02:14:09 -05:00
k
7f25dff1d1 Fix errors 2026-01-07 02:13:08 -05:00
4 changed files with 97 additions and 17 deletions

13
data.py
View File

@@ -4,18 +4,19 @@ import queue
def startDataWorker(dataset,encoding,batch_size,block_size):
data_q = queue.Queue(maxsize=100)
t = threading.Thread(target=data_worker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
t = threading.Thread(target=dataWorker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
t.start()
while (1):
try:
bx, by = data_q.get(timeout=30)
except queue.Empty:
print("queue empty ...")
continue
yield (bx,by)
def dataWorker(q, dataset, encoding, batch_size, block_size):
batch_x, batch_y = [], []
while(1):
while True:
for text in dataset["text"]:
tokens = encoding.encode(text)
for i in range(0, len(tokens)-block_size-1,block_size):
@@ -33,7 +34,7 @@ def dataWorker(q, dataset, encoding, batch_size, block_size):
batch_x.append(x)
batch_y.append(y)
if len(batch_x) == batch_size:
q.put((np.array(batch_x, dtype=np.int32),
np.array(batch_y, dtype=np.int32)))
batch_x, batch_y = [], []
if len(batch_x) == batch_size:
q.put((np.array(batch_x, dtype=np.int32),
np.array(batch_y, dtype=np.int32)))
batch_x, batch_y = [], []

View File

@@ -58,10 +58,10 @@ class Block:
return self
class Transformer():
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,max_len):
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
self.tok_embed = nn.Embedding(vocab_size,embed_size)
self.pos_embed = nn.Embedding(block_size,embed_size)
self.pos_idx = Tensor.arange(max_len, requires_grad=False)
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
self.norm = nn.RMSNorm(embed_size)

20
optm.py
View File

@@ -1,16 +1,18 @@
from tinygrad import Tensor
from tinygrad import Tensor,nn
import math
class CosineLR:
def __init__(self,optm,totalSteps,minlr):
def __init__(self,optm,totalSteps,maxlr,minlr):
self.optm = optm
self.maxlr = optm.lr
self.maxlr = maxlr
self.minlr = minlr
self.totalSteps = totalSteps
self.steps = 0
def step(self):
self.optm.lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((step / self.totalSteps) * math.pi))
lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((self.steps / self.totalSteps) * math.pi))
for o in self.optm:
o.lr = lr
self.optm.step()
self.steps += 1
@@ -18,11 +20,11 @@ class CosineLR:
self.optm.zero_grad()
def llmOptimizer(params,steps,minlr):
def llmOptimizer(params,steps,maxlr,minlr):
muon_params = [p for p in params if len(p.shape) >= 2]
adamw_params = [p for p in params if len(p.shape) < 2]
o1 = nn.optim.Muon(muon_params, lr=hypr["starting_lr"])
o2 = nn.optim.AdamW(adamw_params, lr=hypr["starting_lr"])
optimizer = nn.optim.OptimizerGroup([o1,o2])
return CosineLR(optimizer,steps,minlr)
o1 = nn.optim.Muon(muon_params, lr=maxlr)
o2 = nn.optim.AdamW(adamw_params, lr=maxlr)
optimizer = nn.optim.OptimizerGroup(o1,o2)
return CosineLR(optimizer,steps,maxlr,minlr)

77
train.py Normal file
View File

@@ -0,0 +1,77 @@
from concurrent.futures import ThreadPoolExecutor
from tinygrad import Tensor,TinyJit,Device,nn
from tinygrad.nn.state import get_state_dict
from model import Transformer
from transformers import AutoTokenizer
from datasets import load_dataset
from tqdm import tqdm
import optm
import data
import log
hypr = {
"embed_size": 256,
"n_heads": 4,
"n_blocks": 4,
"block_size": 256,
"batch_size": 16,
"starting_lr": 3e-4,
"minimum_lr": 3e-5,
"warmup": 1_000,
"steps": 5_000,
"encoding": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"dataset": "HuggingFaceTB/smollm-corpus",
"subset": "cosmopedia-v2",
}
print(Device.DEFAULT)
#for loging
loger = ThreadPoolExecutor(max_workers=2)
dataset = load_dataset(hypr["dataset"],
hypr["subset"],
split="train",
streaming=True)
encoding = AutoTokenizer.from_pretrained(hypr["encoding"])
hypr["vocab_size"] = encoding.vocab_size
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
batch = data.startDataWorker(dataset,encoding,hypr["batch_size"],hypr["block_size"])
params = nn.state.get_parameters(model)
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
@TinyJit
def step(x,y):
optimizer.zero_grad()
logits = model(x)
B,T,C = logits.shape
logits = logits.view(B*T,C)
y = y.view(B*T)
loss = logits.cross_entropy(y)
loss.backward()
optimizer.step()
return loss
Tensor.training=True
bar = tqdm(range(hypr["steps"]))
for steps in bar:
nx, ny = next(batch)
x = Tensor(nx, device=Device.DEFAULT).realize()
y = Tensor(ny, device=Device.DEFAULT).realize()
loss = step(x, y)
if steps % 10 == 0:
l = loss.numpy()
loger.submit(log.logLoss, steps, l)
bar.set_postfix(loss= f"{l:.4f}")
if steps % 500 == 0:
loss.realize()
m = get_state_dict(model)
log.logModel(steps,m)
#TODO non sycronus safetensor loging
#loger.submit(log.logModel,steps,m)
loger.shutdown(wait=True)