In [5]:
import numpy as np

# Softmax: convert scores to probabilities
num_classes = 3
scores = np.array([-2, 0.3, 1.5])  # predicted scores

probs = np.exp(scores)
probs = probs/np.sum(probs)
print probs

# Cross-entropy loss: minus log probability of correct class
y = 2
loss = -np.log(probs[y])
print loss
[ 0.02268107  0.22622511  0.75109382]
0.286224707512
In [ ]: