1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
| import torch
x_data = torch.tensor([[1.0],[2.0],[3.0]]) y_data = torch.tensor([[2.0],[4.0],[6.0]])
""" Our model class should inherit from torch.nn.Module, which is the base class for all of the neural networks. __init__() and forward() have to be implemented. class nn.Linear contains two Tensor components: weight and bias class nn.Linear has implemented the magic method __call__(), which enable the instance of the class to be called like a function. therefore, model(data) can call the forward function """
class LinearModel(torch.nn.Module): def __init__(self): super(LinearModel, self).__init__() self.linear = torch.nn.Linear(1, 1) def forward(self, x): y_pred = self.linear(x) return y_pred
model = LinearModel()
criterion = torch.nn.MSELoss(size_average = False) optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)
for epoch in range(1000): y_pred = model(x_data) loss = criterion(y_pred, y_data) print(epoch, loss.item())
optimizer.zero_grad() loss.backward() optimizer.step()
print('w=', model.linear.weight.item()) print('b=', model.linear.weight.item())
x_test = torch.tensor([4.0]) y_test = model(x_test) print('y_pred= ', y_test.data)
|