差分

このページの2つのバージョン間の差分を表示します。

この比較画面へのリンク

次のリビジョン
前のリビジョン
pytorch:regression [2022/06/02 13:48] – 作成 watalupytorch:regression [2022/06/02 13:56] (現在) watalu
行 61: 行 61:
 <code> <code>
 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-# Assume that we are on a CUDA machine, then this should print a CUDA device:+
 print("Executing the model on :",device) print("Executing the model on :",device)
 class Net(torch.nn.Module): class Net(torch.nn.Module):
     def __init__(self, n_feature, size_hidden, n_output):     def __init__(self, n_feature, size_hidden, n_output):
         super(Net, self).__init__()         super(Net, self).__init__()
-        self.hidden = torch.nn.Linear(cols, size_hidden)   # hidden layer +        self.hidden = torch.nn.Linear(cols, size_hidden) 
-        self.predict = torch.nn.Linear(size_hidden, n_output)   # output layer+        self.predict = torch.nn.Linear(size_hidden, n_output)
  
     def forward(self, x):     def forward(self, x):
-        x = F.relu(self.hidden(x))      # activation function for hidden layer +        x = F.relu(self.hidden(x)) 
-        x = self.predict(x)             # linear output+        x = self.predict(x) 
         return x         return x
  
 model = Net(cols, size_hidden, n_output) model = Net(cols, size_hidden, n_output)
-optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) +optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 
-criterion = torch.nn.MSELoss(reduction='sum' # this is for regression mean squared loss+criterion = torch.nn.MSELoss(reduction='sum')
 </code> </code>
  
行 119: 行 119:
 ax.plot(loss_train_history, color="blue") ax.plot(loss_train_history, color="blue")
 ax.plot(loss_test_history, color="red") ax.plot(loss_test_history, color="red")
 +plt.show()
 +fig, ax = plt.subplots()
 +ax.plot(loss_train_history, color="blue")
 +ax.plot(loss_test_history, color="red")
 +plt.ylim(0, 8000)
 +plt.show()
 +plt.plot(loss_test_records)
 plt.show() plt.show()
  
行 127: 行 134:
 pred=result.data[:,0].numpy() pred=result.data[:,0].numpy()
 print(len(pred),len(y_train)) print(len(pred),len(y_train))
-r2_score(pred,y_train)+print(r2_score(pred,y_train))
  
 X = Variable(torch.FloatTensor(X_test))  X = Variable(torch.FloatTensor(X_test)) 
行 133: 行 140:
 pred=result.data[:,0].numpy() pred=result.data[:,0].numpy()
 print(len(pred),len(y_test)) print(len(pred),len(y_test))
-r2_score(pred,y_test)+print(r2_score(pred,y_test))
 </code> </code>