Skip to content
Snippets Groups Projects
Commit 8b93479b authored by umlauf's avatar umlauf
Browse files

CEL

parent 0c11f8a6
No related branches found
No related tags found
No related merge requests found
...@@ -25,8 +25,8 @@ target = torch.tensor([0.0000, 0.6000]) ...@@ -25,8 +25,8 @@ target = torch.tensor([0.0000, 0.6000])
l = 0.4 l = 0.4
logits = torch.tensor([[ 0.9391, -1.2892], [-0.1607, 0.1771]], dtype=torch.float32) logits = torch.tensor([[ 0.9391, -1.2892], [-0.1607, 0.1771]], dtype=torch.float32)
for t, l in zip(target, logits): for t, l in logits, target:
print(l[0]) print(l)
# print(target) # print(target)
# value = target.item() # value = target.item()
......
...@@ -142,7 +142,7 @@ def train(model, name, seed,gradient_accumulation_steps,mixup, threshold, lambda ...@@ -142,7 +142,7 @@ def train(model, name, seed,gradient_accumulation_steps,mixup, threshold, lambda
#log base e #log base e
#Fkt vom Meeting #Fkt vom Meeting
def cross_entropy(logits, target, l): def cross_entropy(logits, target, l):
for t, l in zip(target, logits): for t, l in target, logits:
#makes the logits in log (base) probabilities #makes the logits in log (base) probabilities
logprobs = torch.nn.functional.log_softmax(l, dim=1) logprobs = torch.nn.functional.log_softmax(l, dim=1)
value = t.item() #gets Item (0. or 1.) value = t.item() #gets Item (0. or 1.)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment