Compare commits
11 Commits
007c96e91b
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 98282675b0 | |||
| 8d8fb8c212 | |||
| 89c9d01cb8 | |||
| dc231ae703 | |||
| a0cd98876c | |||
| 0537a5df64 | |||
| c78a31362a | |||
| 496916f428 | |||
| 121640bab6 | |||
| 6f037c4a9a | |||
| 7f25dff1d1 |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.safetensors
|
||||
*.csv
|
||||
35
data.py
35
data.py
@@ -2,33 +2,42 @@ import numpy as np
|
||||
import threading
|
||||
import queue
|
||||
|
||||
def startDataWorker(dataset,encoding,batch_size,block_size):
|
||||
def startDataWorker(dataset,encoding,batch_size,block_size,chat):
|
||||
data_q = queue.Queue(maxsize=100)
|
||||
t = threading.Thread(target=data_worker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
|
||||
t = threading.Thread(target=dataWorker, args=(data_q, dataset, encoding, batch_size, block_size,chat), daemon=True)
|
||||
t.start()
|
||||
while (1):
|
||||
try:
|
||||
bx, by = data_q.get(timeout=30)
|
||||
except queue.Empty:
|
||||
print("queue empty ...")
|
||||
continue
|
||||
yield (bx,by)
|
||||
|
||||
def dataWorker(q, dataset, encoding, batch_size, block_size):
|
||||
def dataWorker(q, dataset, encoding, batch_size, block_size,chat):
|
||||
batch_x, batch_y = [], []
|
||||
while(1):
|
||||
for text in dataset["text"]:
|
||||
tokens = encoding.encode(text)
|
||||
for i in range(0, len(tokens)-block_size-1,block_size):
|
||||
x = [encoding.bos_token_id] + tokens[i:i+block_size-1]
|
||||
y = tokens[i:i+block_size]
|
||||
while True:
|
||||
for text in dataset:
|
||||
tokens = []
|
||||
if(chat):
|
||||
for msg in text['messages']:
|
||||
role = msg['role']
|
||||
content = msg['content']
|
||||
txt = f"<|{role}|>{content}<|end|> "
|
||||
tokens += encoding.encode(txt) + [encoding.eos_token_id]
|
||||
else:
|
||||
tokens = encoding.encode(text["text"])
|
||||
for i in range(0, len(tokens)-block_size+1,block_size):
|
||||
x = tokens[i:i+block_size]
|
||||
y = tokens[i+1:i+block_size+1]
|
||||
|
||||
if len(x) < block_size:
|
||||
pad = len(x)-(block_size-1)
|
||||
x = x + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
|
||||
pad = len(x)-(block_size)
|
||||
x = x + [encoding.eos_token_id] * pad
|
||||
|
||||
if len(y) < block_size:
|
||||
pad = len(y)-(block_size-1)
|
||||
y = y + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
|
||||
pad = len(y)-(block_size)
|
||||
y = y + [encoding.eos_token_id] * pad
|
||||
|
||||
batch_x.append(x)
|
||||
batch_y.append(y)
|
||||
|
||||
23
model.py
23
model.py
@@ -1,12 +1,13 @@
|
||||
from tinygrad import Tensor,nn,TinyJit
|
||||
|
||||
class MultiHeadAttention:
|
||||
def __init__(self,embed_size,n_heads):
|
||||
def __init__(self,embed_size,n_heads,lin):
|
||||
assert embed_size % n_heads == 0
|
||||
self.head_size = embed_size//n_heads
|
||||
self.n_heads = n_heads
|
||||
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
|
||||
self.projection = nn.Linear(embed_size, embed_size,bias=False)
|
||||
self.lin = lin
|
||||
def __call__(self,x):
|
||||
B,T,C=x.shape
|
||||
|
||||
@@ -15,10 +16,16 @@ class MultiHeadAttention:
|
||||
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||
#B H T S
|
||||
#TODO attention free transformer
|
||||
|
||||
out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01)
|
||||
out = None
|
||||
if self.lin:
|
||||
q = q.sigmoid()
|
||||
k = k.sigmoid()
|
||||
out = ((q*k).exp()/(q*k)) * v
|
||||
else:
|
||||
out = q.scaled_dot_product_attention(k,v,is_causal=True)
|
||||
out = out.transpose(1,2).view(B,T,C)
|
||||
|
||||
return self.projection(out)
|
||||
def cast(self,dtype):
|
||||
self.qkv.weight = self.qkv.weight.cast(dtype)
|
||||
@@ -43,8 +50,8 @@ class FeedForwardNetwork:
|
||||
return self
|
||||
|
||||
class Block:
|
||||
def __init__(self,embed_size,n_heads):
|
||||
self.mha = MultiHeadAttention(embed_size,n_heads)
|
||||
def __init__(self,embed_size,n_heads,lin):
|
||||
self.mha = MultiHeadAttention(embed_size,n_heads,lin)
|
||||
self.ffn = FeedForwardNetwork(embed_size)
|
||||
self.mhaNorm = nn.RMSNorm(embed_size)
|
||||
self.ffnNorm = nn.RMSNorm(embed_size)
|
||||
@@ -58,12 +65,12 @@ class Block:
|
||||
return self
|
||||
|
||||
class Transformer():
|
||||
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,max_len):
|
||||
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
|
||||
self.tok_embed = nn.Embedding(vocab_size,embed_size)
|
||||
self.pos_embed = nn.Embedding(block_size,embed_size)
|
||||
self.pos_idx = Tensor.arange(max_len, requires_grad=False)
|
||||
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
|
||||
|
||||
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
||||
self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)]
|
||||
self.norm = nn.RMSNorm(embed_size)
|
||||
self.output = nn.Linear(embed_size,vocab_size,bias=False)
|
||||
def __call__(self,x):
|
||||
|
||||
20
optm.py
20
optm.py
@@ -1,16 +1,18 @@
|
||||
from tinygrad import Tensor
|
||||
from tinygrad import Tensor,nn
|
||||
import math
|
||||
|
||||
class CosineLR:
|
||||
def __init__(self,optm,totalSteps,minlr):
|
||||
def __init__(self,optm,totalSteps,maxlr,minlr):
|
||||
self.optm = optm
|
||||
self.maxlr = optm.lr
|
||||
self.maxlr = maxlr
|
||||
self.minlr = minlr
|
||||
self.totalSteps = totalSteps
|
||||
self.steps = 0
|
||||
|
||||
def step(self):
|
||||
self.optm.lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((step / self.totalSteps) * math.pi))
|
||||
lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((self.steps / self.totalSteps) * math.pi))
|
||||
for o in self.optm:
|
||||
o.lr = lr
|
||||
self.optm.step()
|
||||
self.steps += 1
|
||||
|
||||
@@ -18,11 +20,11 @@ class CosineLR:
|
||||
self.optm.zero_grad()
|
||||
|
||||
|
||||
def llmOptimizer(params,steps,minlr):
|
||||
def llmOptimizer(params,steps,maxlr,minlr):
|
||||
muon_params = [p for p in params if len(p.shape) >= 2]
|
||||
adamw_params = [p for p in params if len(p.shape) < 2]
|
||||
|
||||
o1 = nn.optim.Muon(muon_params, lr=hypr["starting_lr"])
|
||||
o2 = nn.optim.AdamW(adamw_params, lr=hypr["starting_lr"])
|
||||
optimizer = nn.optim.OptimizerGroup([o1,o2])
|
||||
return CosineLR(optimizer,steps,minlr)
|
||||
o1 = nn.optim.Muon(muon_params, lr=maxlr)
|
||||
o2 = nn.optim.AdamW(adamw_params, lr=maxlr)
|
||||
optimizer = nn.optim.OptimizerGroup(o1,o2)
|
||||
return CosineLR(optimizer,steps,maxlr,minlr)
|
||||
|
||||
96
train.py
Normal file
96
train.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from tinygrad.nn.state import get_state_dict,safe_load, load_state_dict
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from tinygrad import Tensor,TinyJit,Device,nn
|
||||
from transformers import AutoTokenizer
|
||||
from datasets import load_dataset
|
||||
from model import Transformer
|
||||
from tqdm import tqdm
|
||||
import optm
|
||||
import data
|
||||
import log
|
||||
import sys
|
||||
|
||||
hypr = {
|
||||
"embed_size": 768,
|
||||
"n_heads": 8,
|
||||
"n_blocks": 12,
|
||||
"block_size": 512,
|
||||
"batch_size": 8,
|
||||
"starting_lr": 6e-4,
|
||||
"minimum_lr": 6e-5,
|
||||
"warmup": 5_000,
|
||||
"steps": 535_000,
|
||||
"encoding": "TinyLlama/TinyLlama_v1.1",
|
||||
"dataset": "HuggingFaceTB/smollm-corpus",
|
||||
"subset": "cosmopedia-v2",
|
||||
"chat_dataset": "HuggingFaceTB/smoltalk",
|
||||
"chat_subset": "all",
|
||||
"half": True,
|
||||
}
|
||||
|
||||
print(Device.DEFAULT)
|
||||
chat = len(sys.argv) > 1
|
||||
if(chat):
|
||||
hypr["dataset"] = hypr["chat_dataset"]
|
||||
hypr["subset"] = hypr["chat_subset"]
|
||||
hypr["starting_lr"] *= 0.1
|
||||
hypr["minimum_lr"] *= 0.1
|
||||
|
||||
#for loging
|
||||
loger = ThreadPoolExecutor(max_workers=2)
|
||||
|
||||
dataset = load_dataset(hypr["dataset"],
|
||||
hypr["subset"],
|
||||
split="train",
|
||||
streaming=True)
|
||||
encoding = AutoTokenizer.from_pretrained(hypr["encoding"])
|
||||
if encoding.pad_token_id == None:
|
||||
encoding.pad_token_id=encoding.eos_token_id
|
||||
hypr["vocab_size"] = encoding.vocab_size
|
||||
batch = data.startDataWorker(dataset,encoding,hypr["batch_size"],hypr["block_size"],chat)
|
||||
|
||||
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
|
||||
if (chat):
|
||||
load_state_dict(model,safe_load(sys.argv[1]))
|
||||
if hypr["half"]:
|
||||
from tinygrad import dtypes
|
||||
model = model.cast(dtypes.float16)
|
||||
params = nn.state.get_parameters(model)
|
||||
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
|
||||
|
||||
@TinyJit
|
||||
def step(x,y):
|
||||
optimizer.zero_grad()
|
||||
|
||||
logits = model(x)
|
||||
B,T,C = logits.shape
|
||||
logits = logits.view(B*T,C)
|
||||
y = y.view(B*T)
|
||||
loss = logits.cross_entropy(y)
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
return loss
|
||||
|
||||
Tensor.training=True
|
||||
bar = tqdm(range(hypr["steps"]))
|
||||
|
||||
for steps in bar:
|
||||
nx, ny = next(batch)
|
||||
x = Tensor(nx, device=Device.DEFAULT).realize()
|
||||
y = Tensor(ny, device=Device.DEFAULT).realize()
|
||||
loss = step(x, y)
|
||||
if steps % 10 == 0:
|
||||
l = loss.numpy()
|
||||
loger.submit(log.logLoss, steps, l)
|
||||
bar.set_postfix(loss= f"{l:.4f}")
|
||||
if steps % 500 == 0:
|
||||
loss.realize()
|
||||
m = get_state_dict(model)
|
||||
log.logModel(steps,m)
|
||||
#TODO non sycronus safetensor loging
|
||||
#loger.submit(log.logModel,steps,m)
|
||||
|
||||
m = get_state_dict(model)
|
||||
log.logModel("final",m)
|
||||
loger.shutdown(wait=True)
|
||||
Reference in New Issue
Block a user