Skip to content
Snippets Groups Projects
Commit b0e532cb authored by umlauf's avatar umlauf
Browse files

softmac

parent 0b878a22
No related branches found
No related tags found
No related merge requests found
......@@ -137,23 +137,17 @@ def train(model, name, seed,gradient_accumulation_steps,mixup, threshold, lambda
return evaluation_test, evaluation_train
#works, but little weird output...
# def cross_entropy(logits, target):
# log_softmax = torch.nn.functional.log_softmax(logits, dim=1)
# loss = -torch.sum(target * log_softmax, dim=1).mean()
# return loss
#pytorch Forum, try with dim=1 for targets
# def cross_entropy(logits, target):
# print("Cross Entropy Target: ", target)
# logprobs = torch.nn.functional.log_softmax (logits, dim = 1)
# return -torch.mean(torch.sum(target * logprobs, dim=1))
def cross_entropy(logits, target):
print("Cross Entropy Target: ", target)
logprobs = torch.nn.functional.log_softmax (logits, dim = 1)
return -torch.mean(torch.sum(target * logprobs, dim=1))
#without softmax
def cross_entropy(logits, target):
log_probs = -torch.log(torch.exp(logits) / torch.sum(torch.exp(logits), dim=1, keepdim=True))
loss = -torch.mean(torch.sum(target * log_probs, dim=1))
return loss
# def cross_entropy(logits, target):
# log_probs = -torch.log(torch.exp(logits) / torch.sum(torch.exp(logits), dim=1, keepdim=True))
# loss = -torch.mean(torch.sum(target * log_probs, dim=1))
# return loss
def mixup_function(batch_of_matrices, batch_of_labels, l, t):
runs = math.floor(batch_of_matrices.size()[0]/2)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment