From 4196b706818bae2aa636570877720c098f028835 Mon Sep 17 00:00:00 2001 From: k Date: Tue, 3 Mar 2026 21:52:30 -0500 Subject: [PATCH] Basic version working. --- bot.py | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- model.py | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 172 insertions(+), 4 deletions(-) create mode 100644 model.py diff --git a/bot.py b/bot.py index 70000db..c5128b9 100755 --- a/bot.py +++ b/bot.py @@ -8,6 +8,8 @@ from tinygrad.nn.state import safe_load, load_state_dict from transformers import AutoTokenizer from model import Transformer from tqdm import tqdm +import threading + hypr = { "embed_size": 768, "n_heads": 8, "n_blocks": 12, "block_size": 512, @@ -26,14 +28,67 @@ Tensor.training = False @TinyJit def run_model(input_buffer): + """ run model on gpu """ return model(input_buffer) def inference_worker(): - """ Runs in a separate thread to handle the heavy lifting. """ - pass + """ consume tasks from que """ + BatchSize=2 + NewList = [None] * BatchSize + + import time + while True: + if (not msg_q.empty() and None in NewList) or NewList.count(None) == len(NewList): + i = NewList.index(None) + out,inp = msg_q.get() + NewList[i] = (out,inp,None) + + batch = [] + for i in range(BatchSize): + t = None + if not NewList[i]: + t = Tensor.zeros(hypr['block_size']) + else: + _, t, _ = NewList[i] + if not isinstance(t, Tensor): + t = Tensor(t) + l = t.shape[0] + pad_len = hypr['block_size'] - l + a,b,_ = NewList[i] + NewList[i] = (a,t,l) + + t = t.pad((0,pad_len)) + else: + #t = t[:-hypr['block_size']] + l = t.shape[0] + pad_len = hypr['block_size'] - l + t = t.pad((0,pad_len)) + + + batch.append(t) + chat_tensor = batch[0].stack(*batch[1:]) + #infince here + + logits = model(chat_tensor) + + #return + for i in range(BatchSize): + if NewList[i] is None: + continue + out, t, lenth = NewList[i] + if lenth < 15: + tok = (logits[i, lenth-1, :] / 0.7).softmax().multinomial(1) + inp = t.cat(tok) + out.put(tok.numpy()[0]) + NewList[i] = (out,inp,(lenth+1)) + else: + print(encoding.decode(chat_tensor[i].numpy().astype(int))[:25]) + out.shutdown() + NewList[i] = None def warmup(count): + """ run count times with random data """ import random tokens = encoding.encode("") tokens = Tensor([tokens]) @@ -45,15 +100,41 @@ def warmup(count): tokens = tokens.cat(token_tensor, dim=1).realize() tokens = tokens[:-hypr['block_size']] +def test(msg): + tokens = queue.Queue() + inp = encoding.encode(msg) + t = [] + + msg_q.put((tokens,inp)) + yield("Start:") + while True: + try: + i = tokens.get() + t.append(i) + yield(f"{i},") + except: + break + txt = encoding.decode(t) + yield f"\n{txt}" + return + +app = flask.Flask(__name__) + +from flask import request +@app.route('/',methods=['POST']) +def complete(): + user_string = request.form.get('input', 'Default prompt') + return test(user_string),{"Content-Type": "text"} def apiStart(): + """ start api """ + app.run() pass - if __name__ == "__main__": print(Device.DEFAULT) print("warming up") - warmup(200) + #warmup(200) t = threading.Thread(target=apiStart, daemon=True) t.start() inference_worker() diff --git a/model.py b/model.py new file mode 100644 index 0000000..e484430 --- /dev/null +++ b/model.py @@ -0,0 +1,87 @@ +from tinygrad import Tensor,nn,TinyJit + +class MultiHeadAttention: + def __init__(self,embed_size,n_heads,lin): + assert embed_size % n_heads == 0 + self.head_size = embed_size//n_heads + self.n_heads = n_heads + self.qkv = nn.Linear(embed_size, embed_size*3,bias=False) + self.projection = nn.Linear(embed_size, embed_size,bias=False) + self.lin = lin + def __call__(self,x): + B,T,C=x.shape + + q,k,v = self.qkv(x).chunk(3,dim=-1) + q = q.view(B, T, self.n_heads, self.head_size).transpose(1, 2) + k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2) + v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2) + #B H T S + + out = None + if self.lin: + q = q.sigmoid() + k = k.sigmoid() + out = ((q*k).exp()/(q*k)) * v + else: + out = q.scaled_dot_product_attention(k,v,is_causal=True) + out = out.transpose(1,2).view(B,T,C) + + return self.projection(out) + def cast(self,dtype): + self.qkv.weight = self.qkv.weight.cast(dtype) + self.projection.weight = self.projection.weight.cast(dtype) + return self + + +class FeedForwardNetwork: + def __init__(self,embed_size,ratio=(8/3)): + hidden_size = int(embed_size*ratio) + self.norm = nn.RMSNorm(embed_size) + self.gate = nn.Linear(embed_size,hidden_size,bias=False) + self.up = nn.Linear(embed_size, hidden_size,bias=False) + self.down = nn.Linear(hidden_size,embed_size,bias=False) + def __call__(self,x): + x = self.norm(x) + return self.down(self.gate(x).silu() * self.up(x)).dropout(0.01) + def cast(self,dtype): + self.gate.weight = self.gate.weight.cast(dtype) + self.up.weight = self.up.weight.cast(dtype) + self.down.weight = self.down.weight.cast(dtype) + return self + +class Block: + def __init__(self,embed_size,n_heads,lin): + self.mha = MultiHeadAttention(embed_size,n_heads,lin) + self.ffn = FeedForwardNetwork(embed_size) + self.mhaNorm = nn.RMSNorm(embed_size) + self.ffnNorm = nn.RMSNorm(embed_size) + def __call__(self,x): + x = x + self.mha(self.mhaNorm(x)) + x = x + self.ffn(self.ffnNorm(x)) + return x + def cast(self,dtype): + self.mha = self.mha.cast(dtype) + self.ffn = self.ffn.cast(dtype) + return self + +class Transformer(): + def __init__(self,vocab_size,embed_size,n_heads,n_blocks,block_size): + self.tok_embed = nn.Embedding(vocab_size,embed_size) + self.pos_embed = nn.Embedding(block_size,embed_size) + self.pos_idx = Tensor.arange(block_size, requires_grad=False).sin() + + self.blocks = [Block(embed_size,n_heads,i%4==0) for i in range(n_blocks)] + self.norm = nn.RMSNorm(embed_size) + self.output = nn.Linear(embed_size,vocab_size,bias=False) + def __call__(self,x): + B,T = x.shape + pos_embeds = self.pos_embed(self.pos_idx[:T]) + x = self.tok_embed(x) + pos_embeds + x = x.sequential(self.blocks) + x = self.norm(x) + return self.output(x) + def cast(self,dtype): + self.tok_embed.weight = self.tok_embed.weight.cast(dtype) + self.blocks = [b.cast(dtype) for b in self.blocks] + self.output.weight = self.output.weight.cast(dtype) + return self