Fix errors
This commit is contained in:
5
data.py
5
data.py
@@ -4,18 +4,19 @@ import queue
|
|||||||
|
|
||||||
def startDataWorker(dataset,encoding,batch_size,block_size):
|
def startDataWorker(dataset,encoding,batch_size,block_size):
|
||||||
data_q = queue.Queue(maxsize=100)
|
data_q = queue.Queue(maxsize=100)
|
||||||
t = threading.Thread(target=data_worker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
|
t = threading.Thread(target=dataWorker, args=(data_q, dataset, encoding, batch_size, block_size), daemon=True)
|
||||||
t.start()
|
t.start()
|
||||||
while (1):
|
while (1):
|
||||||
try:
|
try:
|
||||||
bx, by = data_q.get(timeout=30)
|
bx, by = data_q.get(timeout=30)
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
|
print("queue empty ...")
|
||||||
continue
|
continue
|
||||||
yield (bx,by)
|
yield (bx,by)
|
||||||
|
|
||||||
def dataWorker(q, dataset, encoding, batch_size, block_size):
|
def dataWorker(q, dataset, encoding, batch_size, block_size):
|
||||||
batch_x, batch_y = [], []
|
batch_x, batch_y = [], []
|
||||||
while(1):
|
while True:
|
||||||
for text in dataset["text"]:
|
for text in dataset["text"]:
|
||||||
tokens = encoding.encode(text)
|
tokens = encoding.encode(text)
|
||||||
for i in range(0, len(tokens)-block_size-1,block_size):
|
for i in range(0, len(tokens)-block_size-1,block_size):
|
||||||
|
|||||||
4
model.py
4
model.py
@@ -58,10 +58,10 @@ class Block:
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
class Transformer():
|
class Transformer():
|
||||||
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,max_len):
|
def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size):
|
||||||
self.tok_embed = nn.Embedding(vocab_size,embed_size)
|
self.tok_embed = nn.Embedding(vocab_size,embed_size)
|
||||||
self.pos_embed = nn.Embedding(block_size,embed_size)
|
self.pos_embed = nn.Embedding(block_size,embed_size)
|
||||||
self.pos_idx = Tensor.arange(max_len, requires_grad=False)
|
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
|
||||||
|
|
||||||
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
||||||
self.norm = nn.RMSNorm(embed_size)
|
self.norm = nn.RMSNorm(embed_size)
|
||||||
|
|||||||
20
optm.py
20
optm.py
@@ -1,16 +1,18 @@
|
|||||||
from tinygrad import Tensor
|
from tinygrad import Tensor,nn
|
||||||
import math
|
import math
|
||||||
|
|
||||||
class CosineLR:
|
class CosineLR:
|
||||||
def __init__(self,optm,totalSteps,minlr):
|
def __init__(self,optm,totalSteps,maxlr,minlr):
|
||||||
self.optm = optm
|
self.optm = optm
|
||||||
self.maxlr = optm.lr
|
self.maxlr = maxlr
|
||||||
self.minlr = minlr
|
self.minlr = minlr
|
||||||
self.totalSteps = totalSteps
|
self.totalSteps = totalSteps
|
||||||
self.steps = 0
|
self.steps = 0
|
||||||
|
|
||||||
def step(self):
|
def step(self):
|
||||||
self.optm.lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((step / self.totalSteps) * math.pi))
|
lr = self.minlr + 0.5 * (self.maxlr - self.minlr) * (1 + math.cos((self.steps / self.totalSteps) * math.pi))
|
||||||
|
for o in self.optm:
|
||||||
|
o.lr = lr
|
||||||
self.optm.step()
|
self.optm.step()
|
||||||
self.steps += 1
|
self.steps += 1
|
||||||
|
|
||||||
@@ -18,11 +20,11 @@ class CosineLR:
|
|||||||
self.optm.zero_grad()
|
self.optm.zero_grad()
|
||||||
|
|
||||||
|
|
||||||
def llmOptimizer(params,steps,minlr):
|
def llmOptimizer(params,steps,maxlr,minlr):
|
||||||
muon_params = [p for p in params if len(p.shape) >= 2]
|
muon_params = [p for p in params if len(p.shape) >= 2]
|
||||||
adamw_params = [p for p in params if len(p.shape) < 2]
|
adamw_params = [p for p in params if len(p.shape) < 2]
|
||||||
|
|
||||||
o1 = nn.optim.Muon(muon_params, lr=hypr["starting_lr"])
|
o1 = nn.optim.Muon(muon_params, lr=maxlr)
|
||||||
o2 = nn.optim.AdamW(adamw_params, lr=hypr["starting_lr"])
|
o2 = nn.optim.AdamW(adamw_params, lr=maxlr)
|
||||||
optimizer = nn.optim.OptimizerGroup([o1,o2])
|
optimizer = nn.optim.OptimizerGroup(o1,o2)
|
||||||
return CosineLR(optimizer,steps,minlr)
|
return CosineLR(optimizer,steps,maxlr,minlr)
|
||||||
|
|||||||
Reference in New Issue
Block a user