A quick workflow demo in PyTorch implementing linear regression

Model

Use nn.Parameter

class LinearRegressionModel(nn.Module): def __init__(self): super().__init__() self.weights = nn.Parameter(torch.randn(1, requires_grad=True, dtype=torch.float)) self.bias = nn.Parameter(torch.randn(1, requires_grad=True, dtype = torch.float)) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.weights * x + self.bias model = LinearRegressionModel()

Use nn.Linear

class LinearRegression(nn.Module): def __init__(self): super().__init__() self.linear_layer = nn.Linear(in_features = 1, out_features=1) def forward(self,x: torch.Tensor)-> torch.Tensor: return self.linear_layer(x) model = LinearRegressionModel()

Loss function

loss_fn = torch.nn.L1Loss() optimizer = torch.optim.SGD(params=model_0.parameters(), lr= 0.01)

Training loop

epochs = 300 for epoch in range(epochs): model.train() #set up the mode. Train mode in Pytorch sets all parameter to requite grad that require the gradients y_pred = model(X_train) #forward pass loss = loss_fn(y_pred, y_train) # calculate loss optimizer.zero_grad()# Assign 0 grad so gradients don't get accumulated at each step loss.backward() # perform back propagation optimizer.step() # Update the parameters according to the gradients. model.eval() #turn off settings not needed for evaluation with torch.inference_mode(): test_pred = model(X_test) test_loss = loss_fn(test_pred,y_test)
badge