Skip to content

Commit

Permalink
add joss paper.md and paper.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
josemanuel22 committed Aug 2, 2024
1 parent 56f7980 commit 4df83a4
Showing 1 changed file with 0 additions and 84 deletions.
84 changes: 0 additions & 84 deletions src/CustomLossFunction.jl
Original file line number Diff line number Diff line change
Expand Up @@ -222,29 +222,6 @@ function invariant_statistical_loss(nn_model, data, hparams)
return losses
end;

#=
function invariant_statistical_loss_1(nn_model, loader, hparams)
@assert loader.batchsize == hparams.samples
@assert length(loader) == hparams.epochs
losses = []
optim = Flux.setup(Flux.Adam(hparams.η), nn_model)
@showprogress for data in loader
loss, grads = Flux.withgradient(nn_model) do nn
aₖ = zeros(hparams.K + 1)
for i in 1:(hparams.samples)
x = rand(hparams.transform, hparams.K)
yₖ = nn(x')
aₖ += generate_aₖ(yₖ, data[i])
end
scalar_diff(aₖ ./ sum(aₖ))
end
Flux.update!(optim, nn_model, grads[1])
push!(losses, loss)
end
return losses
end;
=#

"""
AutoISLParams
Expand Down Expand Up @@ -343,67 +320,6 @@ function auto_invariant_statistical_loss(nn_model, data, hparams)
return losses
end;

#=
function auto_invariant_statistical_loss_2(nn_model, data, hparams)
@assert length(data) == hparams.samples
K = 2
@debug "K value set to $K."
losses = []
optim = Flux.setup(Flux.Adam(hparams.η), nn_model)
@showprogress for _ in 1:(hparams.epochs)
K̂ = get_better_K(nn_model, data, K, hparams)
if K < K̂
K = K̂
@debug "K value set to $K."
end
loss, grads = Flux.withgradient(nn_model) do nn
aₖ = zeros(K + 1)
for i in 1:(hparams.samples)
x = rand(hparams.transform, K)
yₖ = nn(x')
aₖ += generate_aₖ(yₖ, data.data[i])
end
scalar_diff(aₖ ./ sum(aₖ))
end
Flux.update!(optim, nn_model, grads[1])
push!(losses, loss)
end
return losses
end;
=#

#=
function auto_invariant_statistical_loss_1(nn_model, loader, hparams)
@assert loader.batchsize == hparams.samples
@assert length(loader) == hparams.epochs
K = 2
@debug "K value set to $K."
losses = []
optim = Flux.setup(Flux.Adam(hparams.η), nn_model)
@showprogress for data in loader
K̂ = get_better_K(nn_model, data, K, hparams)
if K < K̂
K = K̂
@debug "K value set to $K."
end
loss, grads = Flux.withgradient(nn_model) do nn
aₖ = zeros(K + 1)
for i in 1:(hparams.samples)
x = rand(hparams.transform, K)
yₖ = nn(x')
aₖ += generate_aₖ(yₖ, data[i])
end
scalar_diff(aₖ ./ sum(aₖ))
end
Flux.update!(optim, nn_model, grads[1])
push!(losses, loss)
end
return losses
end;
=#

# Hyperparameters for the method `ts_adaptative_block_learning`
"""
HyperParamsTS
Expand Down

0 comments on commit 4df83a4

Please sign in to comment.