1 | # Import necessary libraries |
2 | import torch |
3 | import torch.nn as nn |
4 | from typing import List |
5 |
|
6 |
|
7 | # Define a simple RNN model for time series prediction |
8 | class RNNModel(nn.Module): |
9 | def __init__( |
10 | self, input_size: int, hidden_size: int, output_size: int, num_layers: int |
11 | ): |
12 | super(RNNModel, self).__init__() |
13 | self.hidden_size = hidden_size |
14 | self.num_layers = num_layers |
15 | self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) |
16 | self.fc = nn.Linear(hidden_size, output_size) |
17 |
|
18 | def forward(self, x): |
19 | # Initialize hidden and cell states |
20 | h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) |
21 | # Forward propagate RNN |
22 | out, _ = self.rnn(x, h0) |
23 | # Pass the output of the last time step to the classifier |
24 | out = self.fc(out[:, -1, :]) |
25 | return out |
26 |
|
27 |
|
28 | def main( |
29 | data: List[float], num_epochs: int = 100, learning_rate: float = 0.01 |
30 | ) -> List[float]: |
31 | """ |
32 | Perform time series prediction using an RNN model. |
33 |
|
34 | Parameters: |
35 | - data: List[float], the time series data for training. |
36 | - num_epochs: int, the number of epochs to train the model. |
37 | - learning_rate: float, the learning rate for the optimizer. |
38 |
|
39 | Returns: |
40 | - predictions: List[float], the predicted values for the time series. |
41 | """ |
42 | # Convert data to PyTorch tensors |
43 | data_normalized = torch.FloatTensor(data).view(-1) |
44 | # Define the model |
45 | input_size = 1 |
46 | hidden_size = 64 |
47 | output_size = 1 |
48 | num_layers = 1 |
49 | model = RNNModel(input_size, hidden_size, output_size, num_layers) |
50 | # Loss and optimizer |
51 | criterion = nn.MSELoss() |
52 | optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) |
53 |
|
54 | # Train the model |
55 | for epoch in range(num_epochs): |
56 | for i in range(len(data_normalized) - 1): |
57 | # Prepare data |
58 | sequence = data_normalized[i : i + 1].view(-1, 1, 1) |
59 | target = data_normalized[i + 1].view(-1) |
60 | # Forward pass |
61 | output = model(sequence) |
62 | loss = criterion(output.view(-1), target) |
63 | # Backward and optimize |
64 | optimizer.zero_grad() |
65 | loss.backward() |
66 | optimizer.step() |
67 |
|
68 | if (epoch + 1) % 10 == 0: |
69 | print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}") |
70 |
|
71 | # Predict (Here we use the last part of the data as a simple example) |
72 | test_data = data_normalized[-1:].view(-1, 1, 1) |
73 | with torch.no_grad(): |
74 | predictions = model(test_data).view(-1).tolist() |
75 |
|
76 | return predictions |
77 |
|