1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
| import torch import numpy as np import torch.nn as nn import torch.optim as optim
data = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]], dtype='float32')
x = data[:, :10] y = data[:, 10:14]
class net(nn.Module): def __init__(self): super(net, self).__init__()
self.Conn_layers = nn.Sequential( nn.Linear(10, 4), nn.Sigmoid() )
def forward(self, x): output1 = self.Conn_layers(x)
output = output1.reshape(-1, 4)
return output
net = net()
x = torch.Tensor(x.reshape(-1, 10)) y = torch.Tensor(y.reshape(-1, 4))
loss_function = nn.BCELoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
for epoch in range(10000):
out = net(x)
loss = loss_function(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
test = net(x)
print("input is {}".format(x.detach().numpy())) print('out is {}'.format(test.detach().numpy()))
for layer in net.modules(): if isinstance(layer, nn.Linear):
print("weight is {}".format(layer.weight.detach().numpy())) print("bias is {}".format(layer.bias.detach().numpy()))
|