Mix.install([
{:exla, "~> 0.1.0-dev", github: "elixir-nx/nx", sparse: "exla"},
{:nx, "~> 0.1.0-dev", github: "elixir-nx/nx", sparse: "nx", override: true},
{:axon, "~> 0.1.0-dev", github: "elixir-nx/axon"}
])
Load data from a binary file and performing some preprocessing
<< 4 bits - magic number, 4 bits - number of images, 4 bit - height, 4 bits - width, :: data>>
{:ok, train_images_bin} = File.read("elixir-ml-example/x-ray-train-ubyte")
<<_::32, n_train::32, n_rows::32, n_cols::32, train_data::binary>> = train_images_bin
x_train =
train_data
|> Nx.from_binary({:u, 8})
|> Nx.reshape({n_train, 1, n_rows, n_cols})
|> Nx.divide(255)
Same for labels
<< 4 bits - magic number, 4 bits - number of labels, :: data>>
{:ok, train_labels_bin} = File.read("elixir-ml-example/x-ray-train-labels-ubyte")
<<_::32, n_labels::32, train_label::binary>> = train_labels_bin
y_train =
train_label
|> Nx.from_binary({:u, 8})
|> Nx.reshape({n_train, 1})
|> Nx.equal(Nx.tensor(Enum.to_list(0..1)))
We have to check if the train/test size is divide without remainder by batch size, due to issue with adding random ("Padding") on start
batch_size = 8
case batch_size do
x when rem(n_train, x) != 0 -> raise "Incorrect batch size"
_ -> :ok
end
batch_size
Create train batches
x = Nx.to_batched_list(x_train, batch_size)
y = Nx.to_batched_list(y_train, batch_size)
Model architecture
- looks similar like in Keras
require Axon
model =
Axon.input({nil, 1, n_rows, n_cols})
|> Axon.conv(16, kernel_size: {3, 3}, strides: 1, padding: :same, activation: :relu)
|> Axon.max_pool(kernel_size: {2, 2}, strides: 2, padding: :same)
|> Axon.conv(32, kernel_size: {3, 3}, strides: 1, padding: :same, activation: :relu)
|> Axon.dropout(rate: 0.1)
|> Axon.max_pool(kernel_size: {2, 2}, strides: 2, padding: :same)
|> Axon.conv(32, kernel_size: {3, 3}, strides: 1, padding: :same, activation: :relu)
|> Axon.dropout(rate: 0.1)
|> Axon.max_pool(kernel_size: {2, 2}, strides: 2, padding: :same)
|> Axon.conv(32, kernel_size: {3, 3}, strides: 1, padding: :same, activation: :relu)
|> Axon.dropout(rate: 0.2)
|> Axon.max_pool(kernel_size: {2, 2}, strides: 2, padding: :same)
|> Axon.flatten()
|> Axon.dense(16, activation: :relu)
|> Axon.dropout(rate: 0.1)
|> Axon.dense(8, activation: :relu)
|> Axon.dense(2, activation: :softmax)
Train and save model
- save model and architecture as binary file
epochs = 30
trained_model =
model
|> Axon.Loop.trainer(:categorical_cross_entropy, Axon.Optimizers.adamw(0.000005))
|> Axon.Loop.run(Stream.zip(x, y), epochs: epochs, compiler: EXLA)
filename_w = "elixir-ml-example/x_ray_model_cnn_weights"
filename_m = "elixir-ml-example/x_ray_model_cnn_model"
tranied_model_binary = :erlang.term_to_binary(trained_model)
{:ok, file} = File.open(filename_w, [:write])
IO.binwrite(file, tranied_model_binary)
File.close(file)
model_binary = :erlang.term_to_binary(model)
{:ok, file} = File.open(filename_m, [:write])
IO.binwrite(file, model_binary)
File.close(file)