Kernel Methods

First Problem: 1D Non-Linear Regression

In [1]:
%matplotlib inline

import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.linear_model
import sklearn.kernel_ridge
import sklearn.metrics.pairwise
from matplotlib.colors import ListedColormap

def plot_model(X_test, clf):
    '''
    Note: uses globals x, y, x_test, which are assigned below
    when the dataset is created. Don't overwrite these variables.
    '''
    y_test = clf.predict(X_test)
    plt.scatter(x_train, y_train)
    plt.plot(x_test, y_test)
    plt.legend(['Test', 'Train'])
    
# Set random seed
np.random.seed(0)

# Create random set of m training x values between -5 and 5
m = 100
x_train = np.random.rand(m)*10 - 5   

# Create evenly spaced test x values (for plotting)
x_test  = np.linspace(-5, 5, 100)
m_test  = len(x_test);

def f(x):
    return 0.1*(x + x**2 + 10*x + 0.5*x**2 - 0.5*x**3)*x

def f(x):
    return 0.5*np.cos(x) + np.sin(x) + 4*np.cos(2*x) + np.exp(np.cos(3*x))


y_train = f(x_train) + np.random.randn(m)   # polynomial plus noise

x_train = x_train.reshape(-1, 1)
x_test  = x_test.reshape(-1, 1)

# Plot the training data
plt.scatter(x_train, y_train)
plt.title('Training Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show()

Standard Linear Regression

In [2]:
clf = sklearn.linear_model.LinearRegression()
clf.fit(x_train, y_train)
plot_model(x_test, clf)

Kernel Trick for Linear Regression

Suppose $\theta$ can be rewritten as a linear combination of the feature vectors, i.e., $\theta = \sum_{i=1}^m \alpha_i x^{(i)}$. Then we have that $$ h_{\theta}(x) = \theta^T x = \sum_{i=1}^m \alpha_i (x^{(i)})^T x = \sum_{i=1}^m \alpha_i K(x^{(i)}, x) $$ where $K(x,z) := x^T z$, the "kernel function", computes the dot product between $x$ and $z$.

We can rewrite this one more time to get $$ h_{\theta}(x) = \sum_{i=1}^m \alpha_i K(x^{(i)}, x) = \alpha^T k(x) $$ where $$ \begin{aligned} \alpha &= \begin{bmatrix}\alpha_1, \ldots, \alpha_m\end{bmatrix}^T \\ k(x) &= \begin{bmatrix}K(x^{(1)}, x), & K(x^{(2)}, x), & \ldots, & K(x^{(m)},x)\end{bmatrix}^T \end{aligned} $$

We can think this as a new linear regression problem with parameter vector $\alpha$ and a feature mapping that maps $x$ the vector $k(x)$ of kernel comparisons between $x$ and every training point.

Applying the feature mapping to every row of the original data matrix $X$ yields a "kernel matrix" $K$ defined by $K_{ij} = K(x^{(i)}, x^{(j)})$.

If our assumption holds that the best $\theta$ (the one that achieves minimum cost) can be expressed as a linear combination of feature vectors, then there is some choice for the parameter vector $\alpha$ that will yield this best hypothesis. So, it suffices to look values of $\alpha$ that minimize the squared error in this new linear regression problem. That is, we can simply solve the new linear regression problem where we replace the data matrix by the kernel matrix!

Note: this is not generally how we solve for $\alpha$ in practice, but it is concrete proof that we can solve the learning and prediction problems while only accessing the data (feature vectors) through dot products.

In [3]:
print(x_train, y_train)
[[ 0.48813504]
 [ 2.15189366]
 [ 1.02763376]
 [ 0.44883183]
 [-0.76345201]
 [ 1.45894113]
 [-0.62412789]
 [ 3.91773001]
 [ 4.63662761]
 [-1.16558481]
 [ 2.91725038]
 [ 0.2889492 ]
 [ 0.68044561]
 [ 4.25596638]
 [-4.28963942]
 [-4.128707  ]
 [-4.79781603]
 [ 3.32619846]
 [ 2.78156751]
 [ 3.70012148]
 [ 4.78618342]
 [ 2.99158564]
 [-0.38520638]
 [ 2.80529176]
 [-3.81725574]
 [ 1.39921021]
 [-3.56646713]
 [ 4.44668917]
 [ 0.21848322]
 [-0.8533806 ]
 [-2.35444388]
 [ 2.74233689]
 [-0.43849668]
 [ 0.68433949]
 [-4.812102  ]
 [ 1.17635497]
 [ 1.12095723]
 [ 1.16933997]
 [ 4.43748079]
 [ 1.81820299]
 [-1.40492099]
 [-0.62968046]
 [ 1.97631196]
 [-4.39774528]
 [ 1.66766715]
 [ 1.7063787 ]
 [-2.89617439]
 [-3.71073702]
 [-1.84571649]
 [-1.36289229]
 [ 0.7019677 ]
 [-0.61398487]
 [ 4.88373838]
 [-3.97955189]
 [-2.91123244]
 [-3.38690482]
 [ 1.53108325]
 [-2.46708397]
 [-0.33689227]
 [-2.55574408]
 [-3.41030416]
 [-3.89624859]
 [ 1.56329589]
 [-3.61817049]
 [-3.03417638]
 [-1.31274829]
 [ 3.2099323 ]
 [-4.02898724]
 [ 3.37944907]
 [-4.03901592]
 [ 4.76459465]
 [-0.31348798]
 [ 4.76761088]
 [ 1.0484552 ]
 [ 2.39263579]
 [-4.60812208]
 [-2.17193037]
 [-3.79803439]
 [-2.03859802]
 [-3.81272281]
 [-1.82016821]
 [-0.85737005]
 [-4.35852504]
 [ 1.92472119]
 [ 0.66601454]
 [-2.34610509]
 [ 0.23248053]
 [-4.06059489]
 [ 0.75946496]
 [ 4.29296198]
 [-1.81431048]
 [ 1.6741038 ]
 [-3.68202138]
 [ 2.16327204]
 [-2.10593907]
 [-3.16808638]
 [ 0.86512935]
 [-4.79892454]
 [ 3.28940029]
 [-4.95304524]] [ 3.09793347  2.55114954  0.08571536  3.09100237  1.8509131  -0.23544831
  3.01105187  0.82494047 -4.80715909 -2.03265835  3.39341017  7.24735975
  2.69501317  0.07646786  1.00734519  2.36975015 -2.11615325  5.26844716
  3.64257027  2.31161294 -2.23092164  2.53502201  3.18445196  4.5431819
  1.48653122 -0.14125506  2.93060083 -3.24718483  8.46148029  0.9471273
  2.82791734  4.31978397  3.01204663  4.36154216 -2.39916168 -0.50455242
 -0.04530918 -1.42446413 -1.81043243  0.21493456 -3.68875418  0.67284463
  0.81951571  1.13638183 -2.34096853 -1.59492309  2.84126432  4.78951406
 -1.75211873 -3.5693259   1.52247852  2.48142602 -4.73025506  2.26740079
  2.69471823  4.43895768 -1.50380167  1.20525141  5.36432895  0.69578394
  2.22872521  2.90050641 -1.85140258  3.8347815   6.07417682 -2.87679273
  2.85841924  3.1948896   1.98794248  1.5810306  -4.16342614  6.92274803
 -4.84329178 -1.3511546   2.37711606 -2.27396386  1.22423345  1.62866058
 -1.95827223  2.23332261 -3.12884571  1.35760363  1.06923505  0.20784598
  1.39234525  1.77079251  5.44427585  0.41756049  2.96857841 -0.88350648
 -1.75618836 -1.29648524  3.87664165  1.05502711 -1.35226335  4.57168925
  0.0723856  -2.81672008  3.13437649 -1.92131611]
In [3]:
def linear_kernel(X, Z):
    '''
    Compute dot product between each row of X and each row of Z
    '''
    m1,_ = X.shape
    m2,_ = Z.shape
    K = np.zeros((m1, m2))
    for i in range(m1):
        for j in range(m2):
            K[i,j] = np.dot(X[i,:], Z[j,:])

    return K

K_train = linear_kernel(x_train, x_train) + 1e-10 * np.eye(m)  # see note below
K_test  = linear_kernel(x_test,  x_train)

clf = sklearn.linear_model.LinearRegression()
clf.fit(K_train, y_train)
plot_model(K_test, clf)

[Optional; advanced] Why do we add 1e-10 * np.eye(m) to K_train?

It turns out that our assumption that $\theta = \sum_{i=1}^m \alpha_i x^{(i)}$ needs to be refined slightly.

Let's consider the parameter vector $\theta^*$ that minimizes the cost function $J(\theta) = \frac{1}{2}\sum_{i=1}^m \big(\theta^T x^{(i)} - y^{(i)}\big)^2$. It is true that there is some $\alpha$ such that $\theta^* = \sum_{i=1}^m K(x^{(i)}, x)$. So, we can minimize the cost function by searching over $\alpha$ instead. But there might be many different vectors $\alpha$ that achieve the same cost, so minimizing $J(\alpha) = \frac{1}{2}\sum_{i=1}^m \big(\alpha^T k(x^{(i)}) - y^{(i)}\big)^2$ is not guaranteed to find the vector $\alpha$ that corresponds to $\theta^*$.

However, if we instead add even a tiny bit of regularization to our training objective, we are guaranteed to exactly recover $\theta^*$. This is a result known as the representer theorem.

Try changing this to

.python
K_train = linear_kernel(x_train, x_train)

You will see that the learned model is slightly wiggly, i.e., the learned $\theta$ does not exactly correspond to a hypothesis in the original hypothesis class. But it should have the same cost as $\theta^*$ to within a very small tolerance.

Polynomial Kernel

In [7]:
# Explicit definition
def polynomial_kernel(X, Z, d):
    '''
    Compute dot product between each row of X and each row of Z
    '''
    m1,_ = X.shape
    m2,_ = Z.shape
    K = np.zeros((m1, m2))
    for i in range(m1):
        for j in range(m2):
            K[i,j] = (np.dot(X[i,:], Z[j,:]) + 1)**d
            
    return K


# Better version: use sklearn.metrics.pairwise.polynomial_kernel

d = 12
K_train = sklearn.metrics.pairwise.polynomial_kernel(x_train, x_train, degree=d)
K_test  = sklearn.metrics.pairwise.polynomial_kernel(x_test,  x_train, degree=d)

K_train = K_train + 1e-10*np.eye(m)

clf = sklearn.linear_model.LinearRegression()
clf.fit(K_train, y_train)
plot_model(K_test, clf)

Gaussian Kernel

In [6]:
# Here is an explicit defintion
def gaussian_kernel(X, Z, gamma):
    '''
    Compute dot product between each row of X and each row of Z
    '''    
    m1 = X.shape[0]
    m2 = Z.shape[0]
    
    K = np.zeros((m1, m2))
    
    for i in range(m1):
        for j in range(m2):
            K[i,j] = np.exp( -gamma * np.linalg.norm(X[i,:] - Z[j,:])**2 )
            
    return K

# Better version: use sklearn.metrics.pairwise.rbf_kernel
#   rbf = "radial basis function"

Gaussian Kernel Intuition

$$K(\mathbf{x}, \mathbf{z}) = \exp (- \gamma ||\mathbf{x} - \mathbf{z}||^2 )$$

Intepret as similarity function

  • Equal to 1 when $\mathbf{x} = \mathbf{z}$
  • Decays exponentially as $\mathbf{z}$ and $\mathbf{x}$ get farther apart
  • Smaller $\gamma$ -> slower decay
  • Larger $\gamma$ -> faster decay
In [7]:
gamma_vals = [0.01, 0.1, 1, 10]

x    = np.linspace(-10, 10, 200).reshape(-1,1)
zero = np.array([[0]])

n_plots = len(gamma_vals)

plt.figure(figsize=(12, 4))

for i in range(n_plots):
    plt.subplot(2, 2, i+1)
    k = sklearn.metrics.pairwise.rbf_kernel(x, zero, gamma=gamma_vals[i])
    plt.plot(x, k)
    plt.title('gamma = %.2f' % (gamma_vals[i]))

plt.show()

Linear Regression with Gaussian Kernel

In [10]:
#gamma = 1000
gamma = 1

# Matrix of similarities between training points
K_train = sklearn.metrics.pairwise.rbf_kernel(x_train, x_train, gamma=gamma) + 1e-2*np.eye(m)

# ith row = similarity of ith test point to all training points
K_test  = sklearn.metrics.pairwise.rbf_kernel(x_test,  x_train, gamma=gamma) 

clf = sklearn.linear_model.LinearRegression()
clf.fit(K_train, y_train)
plot_model(K_test, clf)

Linear Regression with Gaussian Kernel and Regularization

Tip: use regularization with kernel methods. Tip: don't form the kernel matrix yourself. Use an optimization routine that allows you to specify the kernel.

In [21]:
from sklearn import kernel_ridge

lambda_val = 0.01
gamma = 1

clf = kernel_ridge.KernelRidge(alpha=lambda_val, kernel='rbf', gamma=gamma)
clf.fit(x_train, y_train)
plot_model(x_test, clf)

Second Problem: 2D Non-Linear Classification

In [11]:
N = 100  # number of points per class
D = 2   # dimensionality
K = 2   # number of classes

X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K) # class labels

for j in range(K):
  ix = range(N*j,N*(j+1))
  r = np.linspace(0.0,1,N) # radius
  t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.4 # theta
  X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
  y[ix] = j

# lets visualize the data:
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
Out[11]:
<matplotlib.collections.PathCollection at 0x1a20522ef0>

Plotting Setup

In [12]:
h = 0.05
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                     np.arange(y_min, y_max, h))

X_test = np.c_[xx.ravel(), yy.ravel()]
labels=['or','ob']

def plot_model(X_test, clf):
    Z = clf.predict(X_test)

    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])

    
    # Plot also the training points
    #plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
    for i in [0, 1]:
        plt.plot(X[y==i, 0], X[y==i, 1], labels[i])
    plt.xlabel('x1')
    plt.ylabel('x2')
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())
    
    
    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    #plt.contourf(xx, yy, Z, alpha=1, cmap=cmap_light)
    plt.pcolormesh(xx, yy, Z,cmap=cmap_light)

Kernelized Logistic Regression

In [18]:
#gamma = 0.01
#gamma = 0.1
#gamma = 1
gamma = 10
gamma = 1000

K      = sklearn.metrics.pairwise.rbf_kernel(X, X, gamma=gamma)
K_test = sklearn.metrics.pairwise.rbf_kernel(X_test, X, gamma=gamma)

clf = sklearn.linear_model.LogisticRegression(solver='lbfgs')
clf.fit(K, y)

plot_model(K_test, clf)

SVM Loss Function

In [19]:
def plot_loss():
    xmin, xmax = -4, 4
    xx = np.linspace(xmin, xmax, 100)

    plt.figure()
    plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',linewidth=3)
    plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'b-',linewidth=3)
    plt.xlabel(r"$\theta^T x$")
    plt.ylabel("Loss")
    plt.legend(["Logistic Loss", "SVM loss (hinge loss)"])
    plt.title(r"cost($\theta^T x, y)$ for $y = 1$")
    plt.show()
    
plot_loss()

SVM Classification

In [20]:
#gamma = 0.01
#gamma = 0.1
#gamma = 1
gamma = 10

C = 10 # C = 2/lambda

clf = sklearn.svm.SVC(C=C, kernel='rbf', gamma=gamma)
clf.fit(X, y)
plot_model(X_test, clf)

SVM vs Kernelized Logistic Regression

In [21]:
C = 1000
gamma = 100

# Fit kernelized logistic regression
K      = sklearn.metrics.pairwise.rbf_kernel(X, X, gamma=gamma)
K_test = sklearn.metrics.pairwise.rbf_kernel(X_test, X, gamma=gamma)
logistic = sklearn.linear_model.LogisticRegression(C=C, solver='lbfgs')
logistic.fit(K, y)

# Fit SVM with same parameter
svm = sklearn.svm.SVC(C=C, kernel='rbf', gamma=gamma)
svm.fit(X, y)

plt.figure(figsize=(10,4))
plt.subplot(121)
plot_model(K_test, logistic)

plt.subplot(122)
plot_model(X_test, svm)
In [ ]:
 
In [ ]: