Compare commits
No commits in common. "6f037c4a9a0aa34551de7ecb714ecc882d695a05" and "007c96e91bef03b7f7e5edafb262c6d0cf1df6e8" have entirely different histories.
6f037c4a9a
...
007c96e91b
4 changed files with 17 additions and 97 deletions
13
data.py
13
data.py
|
|
@ -4,19 +4,18 @@ import queue
|
||||||
|
|
||||||
def startDataWorker(dataset,encoding,batch_size,block_size):
|
def startDataWorker(dataset,encoding,batch_size,block_size):
|
||||||
data_q = queue.Queue(maxsize=100)
|
data_q = queue.Queue(maxsize=100)
|
||||||
t = threading.Thread(target=dataWorker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
|
t = threading.Thread(target=data_worker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
|
||||||
t.start()
|
t.start()
|
||||||
while (1):
|
while (1):
|
||||||
try:
|
try:
|
||||||
bx, by = data_q.get(timeout=30)
|
bx, by = data_q.get(timeout=30)
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
print("queue empty ...")
|
|
||||||
continue
|
continue
|
||||||
yield (bx,by)
|
yield (bx,by)
|
||||||
|
|
||||||
def dataWorker(q, dataset, encoding, batch_size, block_size):
|
def dataWorker(q, dataset, encoding, batch_size, block_size):
|
||||||
batch_x, batch_y = [], []
|
batch_x, batch_y = [], []
|
||||||
while True:
|
while(1):
|
||||||
for text in dataset["text"]:
|
for text in dataset["text"]:
|
||||||
tokens = encoding.encode(text)
|
tokens = encoding.encode(text)
|
||||||
for i in range(0, len(tokens)-block_size-1,block_size):
|
for i in range(0, len(tokens)-block_size-1,block_size):
|
||||||
|
|
@ -34,7 +33,7 @@ def dataWorker(q, dataset, encoding, batch_size, block_size):
|
||||||
batch_x.append(x)
|
batch_x.append(x)
|
||||||
batch_y.append(y)
|
batch_y.append(y)
|
||||||
|
|
||||||
if len(batch_x) == batch_size:
|
if len(batch_x) == batch_size:
|
||||||
q.put((np.array(batch_x, dtype=np.int32),
|
q.put((np.array(batch_x, dtype=np.int32),
|
||||||
np.array(batch_y, dtype=np.int32)))
|
np.array(batch_y, dtype=np.int32)))
|
||||||
batch_x, batch_y = [], []
|
batch_x, batch_y = [], []
|
||||||
|
|
|
||||||
4
model.py
4
model.py
|
|
@ -58,10 +58,10 @@ class Block:
|
||||||
return self
|
return self
|
||||||
|
|
||||||
class Transformer():
|
class Transformer():
|
||||||
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
|
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,max_len):
|
||||||
self.tok_embed = nn.Embedding(vocab_size,embed_size)
|
self.tok_embed = nn.Embedding(vocab_size,embed_size)
|
||||||
self.pos_embed = nn.Embedding(block_size,embed_size)
|
self.pos_embed = nn.Embedding(block_size,embed_size)
|
||||||
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
|
self.pos_idx = Tensor.arange(max_len, requires_grad=False)
|
||||||
|
|
||||||
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
||||||
self.norm = nn.RMSNorm(embed_size)
|
self.norm = nn.RMSNorm(embed_size)
|
||||||
|
|
|
||||||
20
optm.py
20
optm.py
|
|
@ -1,18 +1,16 @@
|
||||||
from tinygrad import Tensor,nn
|
from tinygrad import Tensor
|
||||||
import math
|
import math
|
||||||
|
|
||||||
class CosineLR:
|
class CosineLR:
|
||||||
def __init__(self,optm,totalSteps,maxlr,minlr):
|
def __init__(self,optm,totalSteps,minlr):
|
||||||
self.optm = optm
|
self.optm = optm
|
||||||
self.maxlr = maxlr
|
self.maxlr = optm.lr
|
||||||
self.minlr = minlr
|
self.minlr = minlr
|
||||||
self.totalSteps = totalSteps
|
self.totalSteps = totalSteps
|
||||||
self.steps = 0
|
self.steps = 0
|
||||||
|
|
||||||
def step(self):
|
def step(self):
|
||||||
lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((self.steps / self.totalSteps) * math.pi))
|
self.optm.lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((step / self.totalSteps) * math.pi))
|
||||||
for o in self.optm:
|
|
||||||
o.lr = lr
|
|
||||||
self.optm.step()
|
self.optm.step()
|
||||||
self.steps += 1
|
self.steps += 1
|
||||||
|
|
||||||
|
|
@ -20,11 +18,11 @@ class CosineLR:
|
||||||
self.optm.zero_grad()
|
self.optm.zero_grad()
|
||||||
|
|
||||||
|
|
||||||
def llmOptimizer(params,steps,maxlr,minlr):
|
def llmOptimizer(params,steps,minlr):
|
||||||
muon_params = [p for p in params if len(p.shape) >= 2]
|
muon_params = [p for p in params if len(p.shape) >= 2]
|
||||||
adamw_params = [p for p in params if len(p.shape) < 2]
|
adamw_params = [p for p in params if len(p.shape) < 2]
|
||||||
|
|
||||||
o1 = nn.optim.Muon(muon_params, lr=maxlr)
|
o1 = nn.optim.Muon(muon_params, lr=hypr["starting_lr"])
|
||||||
o2 = nn.optim.AdamW(adamw_params, lr=maxlr)
|
o2 = nn.optim.AdamW(adamw_params, lr=hypr["starting_lr"])
|
||||||
optimizer = nn.optim.OptimizerGroup(o1,o2)
|
optimizer = nn.optim.OptimizerGroup([o1,o2])
|
||||||
return CosineLR(optimizer,steps,maxlr,minlr)
|
return CosineLR(optimizer,steps,minlr)
|
||||||
|
|
|
||||||
77
train.py
77
train.py
|
|
@ -1,77 +0,0 @@
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from tinygrad import Tensor,TinyJit,Device,nn
|
|
||||||
from tinygrad.nn.state import get_state_dict
|
|
||||||
from model import Transformer
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
from datasets import load_dataset
|
|
||||||
from tqdm import tqdm
|
|
||||||
import optm
|
|
||||||
import data
|
|
||||||
import log
|
|
||||||
|
|
||||||
hypr = {
|
|
||||||
"embed_size": 256,
|
|
||||||
"n_heads": 4,
|
|
||||||
"n_blocks": 4,
|
|
||||||
"block_size": 256,
|
|
||||||
"batch_size": 16,
|
|
||||||
"starting_lr": 3e-4,
|
|
||||||
"minimum_lr": 3e-5,
|
|
||||||
"warmup": 1_000,
|
|
||||||
"steps": 5_000,
|
|
||||||
"encoding": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
|
||||||
"dataset": "HuggingFaceTB/smollm-corpus",
|
|
||||||
"subset": "cosmopedia-v2",
|
|
||||||
}
|
|
||||||
|
|
||||||
print(Device.DEFAULT)
|
|
||||||
|
|
||||||
#for loging
|
|
||||||
loger = ThreadPoolExecutor(max_workers=2)
|
|
||||||
|
|
||||||
dataset = load_dataset(hypr["dataset"],
|
|
||||||
hypr["subset"],
|
|
||||||
split="train",
|
|
||||||
streaming=True)
|
|
||||||
encoding = AutoTokenizer.from_pretrained(hypr["encoding"])
|
|
||||||
hypr["vocab_size"] = encoding.vocab_size
|
|
||||||
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
|
|
||||||
batch = data.startDataWorker(dataset,encoding,hypr["batch_size"],hypr["block_size"])
|
|
||||||
|
|
||||||
params = nn.state.get_parameters(model)
|
|
||||||
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
|
|
||||||
|
|
||||||
@TinyJit
|
|
||||||
def step(x,y):
|
|
||||||
optimizer.zero_grad()
|
|
||||||
|
|
||||||
logits = model(x)
|
|
||||||
B,T,C = logits.shape
|
|
||||||
logits = logits.view(B*T,C)
|
|
||||||
y = y.view(B*T)
|
|
||||||
loss = logits.cross_entropy(y)
|
|
||||||
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
return loss
|
|
||||||
|
|
||||||
Tensor.training=True
|
|
||||||
bar = tqdm(range(hypr["steps"]))
|
|
||||||
|
|
||||||
for steps in bar:
|
|
||||||
nx, ny = next(batch)
|
|
||||||
x = Tensor(nx, device=Device.DEFAULT).realize()
|
|
||||||
y = Tensor(ny, device=Device.DEFAULT).realize()
|
|
||||||
loss = step(x, y)
|
|
||||||
if steps % 10 == 0:
|
|
||||||
l = loss.numpy()
|
|
||||||
loger.submit(log.logLoss, steps, l)
|
|
||||||
bar.set_postfix(loss= f"{l:.4f}")
|
|
||||||
if steps % 500 == 0:
|
|
||||||
loss.realize()
|
|
||||||
m = get_state_dict(model)
|
|
||||||
log.logModel(steps,m)
|
|
||||||
#TODO non sycronus safetensor loging
|
|
||||||
#loger.submit(log.logModel,steps,m)
|
|
||||||
|
|
||||||
loger.shutdown(wait=True)
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue