Compare commits
7 Commits
496916f428
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 98282675b0 | |||
| 8d8fb8c212 | |||
| 89c9d01cb8 | |||
| dc231ae703 | |||
| a0cd98876c | |||
| 0537a5df64 | |||
| c78a31362a |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
*.safetensors
|
||||||
|
*.csv
|
||||||
22
data.py
22
data.py
@@ -18,26 +18,26 @@ def dataWorker(q, dataset, encoding, batch_size, block_size,chat):
|
|||||||
batch_x, batch_y = [], []
|
batch_x, batch_y = [], []
|
||||||
while True:
|
while True:
|
||||||
for text in dataset:
|
for text in dataset:
|
||||||
tokens = None
|
tokens = []
|
||||||
if(chat):
|
if(chat):
|
||||||
txt = f"<|user|>{text['instruction']}"
|
for msg in text['messages']:
|
||||||
if(text["input"] != None):
|
role = msg['role']
|
||||||
txt += f"\n{text['input']}"
|
content = msg['content']
|
||||||
txt = txt + f"<|end|>\n<|assistant|>{text['output']}<|end|>"
|
txt = f"<|{role}|>{content}<|end|> "
|
||||||
tokens = [encoding.bos_token_id]+encoding.encode(txt)
|
tokens += encoding.encode(txt) + [encoding.eos_token_id]
|
||||||
else:
|
else:
|
||||||
tokens = [encoding.bos_token_id]+encoding.encode(text["text"])
|
tokens = encoding.encode(text["text"])
|
||||||
for i in range(0, len(tokens)-block_size+1,block_size):
|
for i in range(0, len(tokens)-block_size+1,block_size):
|
||||||
x = tokens[i:i+block_size]
|
x = tokens[i:i+block_size]
|
||||||
y = tokens[i+1:i+block_size+1]
|
y = tokens[i+1:i+block_size+1]
|
||||||
|
|
||||||
if len(x) < block_size:
|
if len(x) < block_size:
|
||||||
pad = len(x)-(block_size-1)
|
pad = len(x)-(block_size)
|
||||||
x = x + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
|
x = x + [encoding.eos_token_id] * pad
|
||||||
|
|
||||||
if len(y) < block_size:
|
if len(y) < block_size:
|
||||||
pad = len(y)-(block_size-1)
|
pad = len(y)-(block_size)
|
||||||
y = y + [encoding.eos_token_id] + [encoding.pad_token_id] * pad
|
y = y + [encoding.eos_token_id] * pad
|
||||||
|
|
||||||
batch_x.append(x)
|
batch_x.append(x)
|
||||||
batch_y.append(y)
|
batch_y.append(y)
|
||||||
|
|||||||
19
model.py
19
model.py
@@ -1,12 +1,13 @@
|
|||||||
from tinygrad import Tensor,nn,TinyJit
|
from tinygrad import Tensor,nn,TinyJit
|
||||||
|
|
||||||
class MultiHeadAttention:
|
class MultiHeadAttention:
|
||||||
def __init__(self,embed_size,n_heads):
|
def __init__(self,embed_size,n_heads,lin):
|
||||||
assert embed_size % n_heads == 0
|
assert embed_size % n_heads == 0
|
||||||
self.head_size = embed_size//n_heads
|
self.head_size = embed_size//n_heads
|
||||||
self.n_heads = n_heads
|
self.n_heads = n_heads
|
||||||
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
|
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
|
||||||
self.projection = nn.Linear(embed_size, embed_size,bias=False)
|
self.projection = nn.Linear(embed_size, embed_size,bias=False)
|
||||||
|
self.lin = lin
|
||||||
def __call__(self,x):
|
def __call__(self,x):
|
||||||
B,T,C=x.shape
|
B,T,C=x.shape
|
||||||
|
|
||||||
@@ -15,10 +16,16 @@ class MultiHeadAttention:
|
|||||||
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||||
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||||
#B H T S
|
#B H T S
|
||||||
#TODO attention free transformer
|
|
||||||
|
|
||||||
out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01)
|
out = None
|
||||||
|
if self.lin:
|
||||||
|
q = q.sigmoid()
|
||||||
|
k = k.sigmoid()
|
||||||
|
out = ((q*k).exp()/(q*k)) * v
|
||||||
|
else:
|
||||||
|
out = q.scaled_dot_product_attention(k,v,is_causal=True)
|
||||||
out = out.transpose(1,2).view(B,T,C)
|
out = out.transpose(1,2).view(B,T,C)
|
||||||
|
|
||||||
return self.projection(out)
|
return self.projection(out)
|
||||||
def cast(self,dtype):
|
def cast(self,dtype):
|
||||||
self.qkv.weight = self.qkv.weight.cast(dtype)
|
self.qkv.weight = self.qkv.weight.cast(dtype)
|
||||||
@@ -43,8 +50,8 @@ class FeedForwardNetwork:
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
class Block:
|
class Block:
|
||||||
def __init__(self,embed_size,n_heads):
|
def __init__(self,embed_size,n_heads,lin):
|
||||||
self.mha = MultiHeadAttention(embed_size,n_heads)
|
self.mha = MultiHeadAttention(embed_size,n_heads,lin)
|
||||||
self.ffn = FeedForwardNetwork(embed_size)
|
self.ffn = FeedForwardNetwork(embed_size)
|
||||||
self.mhaNorm = nn.RMSNorm(embed_size)
|
self.mhaNorm = nn.RMSNorm(embed_size)
|
||||||
self.ffnNorm = nn.RMSNorm(embed_size)
|
self.ffnNorm = nn.RMSNorm(embed_size)
|
||||||
@@ -63,7 +70,7 @@ class Transformer():
|
|||||||
self.pos_embed = nn.Embedding(block_size,embed_size)
|
self.pos_embed = nn.Embedding(block_size,embed_size)
|
||||||
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
|
self.pos_idx = Tensor.arange(block_size, requires_grad=False)
|
||||||
|
|
||||||
self.blocks = [Block(embed_size,n_heads) for _ in range(n_blocks)]
|
self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)]
|
||||||
self.norm = nn.RMSNorm(embed_size)
|
self.norm = nn.RMSNorm(embed_size)
|
||||||
self.output = nn.Linear(embed_size,vocab_size,bias=False)
|
self.output = nn.Linear(embed_size,vocab_size,bias=False)
|
||||||
def __call__(self,x):
|
def __call__(self,x):
|
||||||
|
|||||||
24
train.py
24
train.py
@@ -11,20 +11,21 @@ import log
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
hypr = {
|
hypr = {
|
||||||
"embed_size": 512,
|
"embed_size": 768,
|
||||||
"n_heads": 8,
|
"n_heads": 8,
|
||||||
"n_blocks": 6,
|
"n_blocks": 12,
|
||||||
"block_size": 256,
|
"block_size": 512,
|
||||||
"batch_size": 16,
|
"batch_size": 8,
|
||||||
"starting_lr": 6e-4,
|
"starting_lr": 6e-4,
|
||||||
"minimum_lr": 6e-5,
|
"minimum_lr": 6e-5,
|
||||||
"warmup": 1_000,
|
"warmup": 5_000,
|
||||||
"steps": 20_000,
|
"steps": 535_000,
|
||||||
"encoding": "gpt2",
|
"encoding": "TinyLlama/TinyLlama_v1.1",
|
||||||
"dataset": "HuggingFaceTB/smollm-corpus",
|
"dataset": "HuggingFaceTB/smollm-corpus",
|
||||||
"subset": "cosmopedia-v2",
|
"subset": "cosmopedia-v2",
|
||||||
"chat_dataset": "yahma/alpaca-cleaned",
|
"chat_dataset": "HuggingFaceTB/smoltalk",
|
||||||
"chat_subset": None,
|
"chat_subset": "all",
|
||||||
|
"half": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(Device.DEFAULT)
|
print(Device.DEFAULT)
|
||||||
@@ -32,6 +33,8 @@ chat = len(sys.argv) > 1
|
|||||||
if(chat):
|
if(chat):
|
||||||
hypr["dataset"] = hypr["chat_dataset"]
|
hypr["dataset"] = hypr["chat_dataset"]
|
||||||
hypr["subset"] = hypr["chat_subset"]
|
hypr["subset"] = hypr["chat_subset"]
|
||||||
|
hypr["starting_lr"] *= 0.1
|
||||||
|
hypr["minimum_lr"] *= 0.1
|
||||||
|
|
||||||
#for loging
|
#for loging
|
||||||
loger = ThreadPoolExecutor(max_workers=2)
|
loger = ThreadPoolExecutor(max_workers=2)
|
||||||
@@ -49,6 +52,9 @@ batch = data.startDataWorker(dataset,encoding,hypr["batch_size"],hypr["block_siz
|
|||||||
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
|
model = Transformer(hypr["vocab_size"],hypr["embed_size"],hypr["n_heads"],hypr["n_blocks"],hypr["block_size"])
|
||||||
if (chat):
|
if (chat):
|
||||||
load_state_dict(model,safe_load(sys.argv[1]))
|
load_state_dict(model,safe_load(sys.argv[1]))
|
||||||
|
if hypr["half"]:
|
||||||
|
from tinygrad import dtypes
|
||||||
|
model = model.cast(dtypes.float16)
|
||||||
params = nn.state.get_parameters(model)
|
params = nn.state.get_parameters(model)
|
||||||
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
|
optimizer = optm.llmOptimizer(params,hypr["steps"],hypr["starting_lr"],hypr["minimum_lr"])
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user