import mlflow import numpy as np from tinygrad import Device,Tensor,nn,TinyJit import matplotlib.pyplot as plt import time import show from model import gen BATCH_SIZE = 16 EPOCHS = 100 LEARNING_RATE = 1e-5 print(Device.DEFAULT) mdl = gen() opt = nn.optim.AdamW(nn.state.get_parameters(mdl), lr=LEARNING_RATE) volume = 0.1 def spec_loss(pred, target, eps=1e-6): # spectral convergence sc = ((target - pred).square().sum()) ** 0.5 / ((target.square().sum()) ** 0.5 + eps) # log magnitude difference log_mag = ((target.abs() + eps).log() - (pred.abs() + eps).log()).abs().mean() return sc + log_mag @TinyJit def step_gen(x): Tensor.training = True noise = Tensor.rand_like(x).tanh() y = x+(noise*volume) y = y.clamp(0,1) loss = spec_loss(mdl(y),x) opt.zero_grad() loss.backward() opt.step() return loss.numpy() print("loading") x = np.load("data.npz")["arr_0"] #x= x[0:64] run_name = f"tinygrad_autoencoder_{int(time.time())}" mlflow.set_tracking_uri("http://127.0.0.1:5000") mlflow.start_run() mlflow.log_params({"batch_size": BATCH_SIZE, "epochs": EPOCHS, "lr": LEARNING_RATE, "data size":len(x)}) show.logSpec(Tensor(x[0:1]).numpy()[0][0],"default") print("training") pl = 0 eshape = (BATCH_SIZE, 1, 128, 431) for epoch in range(0,EPOCHS): print(f"\n--- Starting Epoch {epoch} ---\n") loss=0 for i in range(0,len(x),BATCH_SIZE): tx=Tensor(x[i:i+BATCH_SIZE]) if(tx.shape != eshape): continue loss += step_gen(tx) loss /= (len(x)/BATCH_SIZE) if epoch%5==0: noise = Tensor.rand_like(Tensor(x[0:1])).tanh() y = Tensor(x[0:1]) + (noise*volume) show.logSpec(mdl(y).numpy()[0][0],epoch) if(pl - loss < 0.03 and epoch > 25): show.logSpec(y.numpy()[0][0],f"volume_{volume}") volume *= 2 pl = loss mlflow.log_metric("volume", volume, step=epoch) mlflow.log_metric("loss", loss, step=epoch) print(f"loss of {loss}")