Skip to content
Snippets Groups Projects
Commit 7e811a15 authored by umlauf's avatar umlauf
Browse files

new

parent 2d412ade
No related branches found
No related tags found
No related merge requests found
......@@ -110,7 +110,7 @@ def train(model, name, seed,gradient_accumulation_steps,mixup, threshold, lambda
#add mira 1 line
#new_labels_batch = new_labels_batch.to(torch.float64)
loss_2=loss_fct(logits.view(-1, 2).to("cuda"), new_labels_batch.view(-1).to("cuda").to(torch.float))
loss_2=loss_fct(logits.view(-1, 2).to("cuda"), new_labels_batch.view(-1).to("cuda").to(torch.float32))
print("MixUp Loss: ", loss_2)
#update entire model
loss_2.backward()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment