Implimented MultiHeadAttention
This commit is contained in:
28
model.py
28
model.py
@@ -1,12 +1,28 @@
|
||||
from tinygrad import Tensor,nn,TinyJit
|
||||
|
||||
class MultiHeadAttention:
|
||||
def __init__(self):
|
||||
pass #TODO
|
||||
def __call__(self):
|
||||
pass #TODO
|
||||
def cast(self):
|
||||
pass #TODO
|
||||
def __init__(self,embed_size,n_heads):
|
||||
assert embed_size % n_heads == 0
|
||||
self.head_size = embed_size//n_heads
|
||||
self.n_heads = n_heads
|
||||
self.qkv = nn.Linear(embed_size, embed_size*3,bias=False)
|
||||
self.projection = nn.Linear(embed_size, embed_size,bias=False)
|
||||
def __call__(self,x):
|
||||
B,T,C=x.shape
|
||||
|
||||
q,k,v = self.qkv(x).chunk(3,dim=-1)
|
||||
q = q.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||
k = k.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||
v = v.view(B, T, self.n_heads, self.head_size).transpose(1, 2)
|
||||
#B H T S
|
||||
|
||||
out = q.scaled_dot_product_attention(k,v,is_causal=True,dropout_p=0.01)
|
||||
out = out.transpose(1,2).view(B,T,C)
|
||||
return self.projection(out)
|
||||
def cast(self,dtype):
|
||||
self.qkv.weight = self.qkv.weight.cast(dtype)
|
||||
self.projection.weight = self.projection.weight.cast(dtype)
|
||||
return self
|
||||
|
||||
|
||||
class FeedForwardNetwork:
|
||||
|
||||
Reference in New Issue
Block a user