require "nn"
require 'optim'
mlp = nn.Sequential(); -- make a multi-layer perceptron
inputs = 2; outputs = 1; HUs = 20; -- parameters
mlp:add(nn.Linear(inputs, HUs))
mlp:add(nn.Sigmoid())
mlp:add(nn.Linear(HUs, outputs))
mlp:add(nn.Sigmoid())
criterion = nn.BCECriterion()
print(mlp)
batchSize = 128
batchInputs = torch.DoubleTensor(batchSize, inputs) -- or CudaTensor for GPU training
batchLabels = torch.DoubleTensor(batchSize) -- or CudaTensor for GPU training
for i = 1, batchSize do
local input = torch.randn(2) -- normally distributed example in 2d
local label
for i = 1, 2 do
input[i] = (input[i] > 0 and 1 or 0)
end
label = bit.bxor(input[1], input[2])
batchInputs[i]:copy(input)
batchLabels[i] = label
end
params, gradParams = mlp:getParameters()
local optimState = {learningRate = 0.01}
for epoch = 1, 500 do
function feval(params)
gradParams:zero()
local outputs = mlp:forward(batchInputs)
local loss = criterion:forward(outputs, batchLabels)
local dloss_doutputs = criterion:backward(outputs, batchLabels)
mlp:backward(batchInputs, dloss_doutputs)
return loss, gradParams
end
optim.adam(feval, params, optimState)
end
print("\n---Tests---\n")
x = torch.Tensor(2)
x[1] = 1; x[2] = 1; print("Input: ", x[1], ", ", x[2]); print("Output: ", mlp:forward(x))
x[1] = 1; x[2] = 0; print("Input: ", x[1], ", ", x[2]); print("Output: ", mlp:forward(x))
x[1] = 0; x[2] = 1; print("Input: ", x[1], ", ", x[2]); print("Output: ", mlp:forward(x))
x[1] = 0; x[2] = 0; print("Input: ", x[1], ", ", x[2]); print("Output: ", mlp:forward(x))
Be the first to comment
You can use [html][/html], [css][/css], [php][/php] and more to embed the code. Urls are automatically hyperlinked. Line breaks and paragraphs are automatically generated.