knitr::opts_chunk$set( collapse = TRUE, comment = "#>" )
library(rTorch) reticulate::use_condaenv("r-torch", required = TRUE) invisible(reticulate::py_config())
```{python, eval=FALSE}
import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt
np.random.seed(2048)
N = 100 D = 2 X = np.random.randn(N, D) * 2 ctr = int(N/2)
X[:ctr,:] = X[:ctr,:] - 2 * np.ones((ctr, D))
X[ctr:,:] = X[ctr:,:] + 2 * np.ones((ctr, D))
T = np.array([0] * ctr + [1] * ctr).reshape(100, 1)
plt.scatter(X[:,0], X[:,1], c=T.reshape(N), s=100, alpha=0.5) plt.xlabel('X(1)') plt.ylabel('X(2)') plt.savefig('before.png')
class Model(torch.nn.Module): def init(self): super(Model, self).init() self.linear = torch.nn.Linear(2, 1) # 2 in and 1 out
def forward(self, x): y_pred = torch.sigmoid(self.linear(x)) return y_pred
model = Model()
criterion = torch.nn.BCELoss(reduction='mean') optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
x_data = Variable(torch.Tensor(X)) y_data = Variable(torch.Tensor(T))
print("Loss at each iteration") for epoch in range(1000): # Forward pass: Compute predicted y by passing x to the model y_pred = model(x_data)
# Compute and print loss loss = criterion(y_pred, y_data) print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step()
w = list(model.parameters()) w0 = w[0].data.numpy() # convert torch tensors to numpy w1 = w[1].data.numpy()
print("Final gradient descend:", w)
plt.scatter(X[:,0], X[:,1], c=T.reshape(N), s=100, alpha=0.5) x_axis = np.linspace(-6, 6, 100) y_axis = -(w1[0] + x_axis * w0[0][0]) / w0[0][1] line_up, = plt.plot(x_axis, y_axis,'r--', label='gradient descent') plt.legend(handles=[line_up]) plt.xlabel('X(1)') plt.ylabel('X(2)') plt.savefig('after.png')
```{python, engine="python3", results="hold", collapse=FALSE, echo=FALSE} # Logistic Regression # https://m-alcu.github.io/blog/2018/02/10/logit-pytorch/ import numpy as np import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt np.random.seed(2048) N = 100 D = 2 X = np.random.randn(N, D) * 2 ctr = int(N/2) # center the first N/2 points at (-2,-2) X[:ctr,:] = X[:ctr,:] - 2 * np.ones((ctr, D)) # center the last N/2 points at (2, 2) X[ctr:,:] = X[ctr:,:] + 2 * np.ones((ctr, D)) # labels: first N/2 are 0, last N/2 are 1 # mark the first half with 0 and the second half with 1 T = np.array([0] * ctr + [1] * ctr).reshape(100, 1) # plot the data. color the dots using T plt.scatter(X[:,0], X[:,1], c=T.reshape(N), s=100, alpha=0.5) plt.xlabel('X(1)') plt.ylabel('X(2)') plt.savefig('before.png') class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.linear = torch.nn.Linear(2, 1) # 2 in and 1 out def forward(self, x): y_pred = torch.sigmoid(self.linear(x)) return y_pred # Our model model = Model() criterion = torch.nn.BCELoss(reduction='mean') optimizer = torch.optim.SGD(model.parameters(), lr=0.01) x_data = Variable(torch.Tensor(X)) y_data = Variable(torch.Tensor(T)) # Training loop print("Loss at each iteration") for epoch in range(1000): # Forward pass: Compute predicted y by passing x to the model y_pred = model(x_data) # Compute and print loss loss = criterion(y_pred, y_data) print(epoch, loss.item()) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() w = list(model.parameters()) w0 = w[0].data.numpy() w1 = w[1].data.numpy() print("Final gradient descend:", w) # plot the data and separating line plt.scatter(X[:,0], X[:,1], c=T.reshape(N), s=100, alpha=0.5) x_axis = np.linspace(-6, 6, 100) y_axis = -(w1[0] + x_axis * w0[0][0]) / w0[0][1] line_up, = plt.plot(x_axis, y_axis,'r--', label='gradient descent') plt.legend(handles=[line_up]) plt.xlabel('X(1)') plt.ylabel('X(2)') plt.savefig('after.png')
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.