net=Net(in_dim=8,n_hidden1=4,n_hidden2=6,out_dim=2)
optimizer=torch.optim.SGD(net.parameters(),lr=0.02)
loss_fun=torch.nn.CrossEntropyLoss()
losses=[]
epoches=1000
for i in range(epoches):
out=net.forward(datalL_train)
new_out=out.squeeze()
loss=loss_fun(new_out,lableL_train.long())
losses.append(loss.item())
optimizer.zero_grad() # 清除一下上次梯度计算的数值
loss.backward() # 进行反向传播
optimizer.step() # 最优化迭代
predict=net.forward(datalL_test)
print(predict)
print(lableL_test)
‘’‘
tensor([[-0.1968, 0.9195],
[-0.3286, 0.9704],
[-0.4561, 1.1754],
[-0.8294, 1.5649]], grad_fn=
)
tensor([0., 1., 1., 1.])
’‘’