import torch
import torch.nn as nn
import torch.optim as optim
# Ensure reproducibility
torch.manual_seed(0)
# 1. Generate a toy dataset
# Let's generate some random data with shape (100, 1)
inputs = torch.rand(100, 1)
# Let's use a simple linear relation with added noise for targets
targets = 2.5 * inputs + 3 + torch.randn(100, 1) * 0.1
# 2. Define the neural network
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(1, 10) # First fully-connected layer
self.fc2 = nn.Linear(10, 1) # Second fully-connected layer
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x) # Activation function
return self.fc2(x)
# Create an instance of the network
model = SimpleNN()
# 3. Define the loss and optimizer
criterion = nn.MSELoss() # Mean squared error
optimizer = optim.SGD(model.parameters(), lr=0.01)
# 4. Train the network
num_epochs = 500
for epoch in range(num_epochs):
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
# Zero gradients, backward pass, optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print the loss every 50 epochs
if (epoch + 1) % 100 == 0:
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")
Epoch [100/500], Loss: 0.0366
Epoch [200/500], Loss: 0.0167
Epoch [300/500], Loss: 0.0105
Epoch [400/500], Loss: 0.0086
Epoch [500/500], Loss: 0.0080