From 8267f41dca1c32a162a3b8e4bacb4d436c2da4d9 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Tue, 28 Jan 2020 09:56:37 +1300 Subject: [PATCH 1/4] minor refactor of learning_curve --- src/learning_curves.jl | 67 +++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 37 deletions(-) diff --git a/src/learning_curves.jl b/src/learning_curves.jl index e8e0d14..623b79d 100644 --- a/src/learning_curves.jl +++ b/src/learning_curves.jl @@ -57,29 +57,37 @@ plot!(curves.parameter_values, curves.measurements, xlab=curves.parameter_name, ylab="Holdout estimate of RMS error") + + ``` + learning_curve(model::Supervised, X, y; kwargs...) + learning_curve(model::Supervised, X, y, w; kwargs...) + +Plot a learning curve (or curves) directly, without first constructing +a machine. """ -function learning_curve(mach::Machine{<:Supervised}; - resolution=30, - resampling=Holdout(), - weights=nothing, - measure=nothing, - operation=predict, - range::Union{Nothing,ParamRange}=nothing, - repeats=1, - acceleration=default_resource(), - acceleration_grid=CPU1(), - verbosity=1, - rngs=nothing, - rng_name=nothing, - check_measure=true) - - if measure == nothing - measure = default_measure(mach.model) - verbosity < 1 || - @info "No measure specified. Using measure=$measure. " - end +learning_curve(mach::Machine{<:Supervised}; kwargs...) = + learning_curve(mach.model, mach.args...; kwargs...) + +# for backwards compatibility +learning_curve!(mach::Machine{<:Supervised}; kwargs...) = + learning_curve(mach; kwargs...) + +function learning_curve(model::Supervised, args...; + resolution=30, + resampling=Holdout(), + weights=nothing, + measure=nothing, + operation=predict, + range::Union{Nothing,ParamRange}=nothing, + repeats=1, + acceleration=default_resource(), + acceleration_grid=CPU1(), + verbosity=1, + rngs=nothing, + rng_name=nothing, + check_measure=true) range !== nothing || error("No param range specified. Use range=... ") @@ -97,7 +105,7 @@ function learning_curve(mach::Machine{<:Supervised}; end end - tuned_model = TunedModel(model=mach.model, + tuned_model = TunedModel(model=model, range=range, tuning=Grid(resolution=resolution, shuffle=false), @@ -109,7 +117,7 @@ function learning_curve(mach::Machine{<:Supervised}; repeats=repeats, acceleration=acceleration_grid) - tuned = machine(tuned_model, mach.args...) + tuned = machine(tuned_model, args...) results = _tuning_results(rngs, acceleration, tuned, rng_name, verbosity) @@ -176,18 +184,3 @@ function _tuning_results(rngs::AbstractVector, acceleration::CPUProcesses, return ret end -learning_curve!(machine::Machine, args...) = - learning_curve(machine, args...) - -""" - learning_curve(model::Supervised, args...; kwargs...) - -Plot a learning curve (or curves) without first constructing a -machine. Equivalent to `learing_curve(machine(model, args...); -kwargs...) - -See [learning_curve](@ref) - -""" -learning_curve(model::Supervised, args...; kwargs...) = - learning_curve(machine(model, args...); kwargs...) From 13ee44b6431436d60d3680bae558cff724e0423e Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Tue, 28 Jan 2020 10:09:20 +1300 Subject: [PATCH 2/4] remove redundant code --- src/learning_curves.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/learning_curves.jl b/src/learning_curves.jl index 623b79d..dd96010 100644 --- a/src/learning_curves.jl +++ b/src/learning_curves.jl @@ -125,7 +125,6 @@ function learning_curve(model::Supervised, args...; parameter_scale=results.parameter_scales[1] parameter_values=[results.parameter_values[:, 1]...] measurements = results.measurements - measurements = (rngs == nothing) ? [measurements...] : measurements return (parameter_name=parameter_name, parameter_scale=parameter_scale, From 5c8115ce046757870b10ff7c6c9dba43ecf2fcc7 Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Tue, 28 Jan 2020 11:59:37 +1300 Subject: [PATCH 3/4] add tests; rm redundant code --- src/inhomogeneous_list.jl | 0 src/learning_curves.jl | 6 ++++-- test/learning_curves.jl | 8 ++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) delete mode 100644 src/inhomogeneous_list.jl diff --git a/src/inhomogeneous_list.jl b/src/inhomogeneous_list.jl deleted file mode 100644 index e69de29..0000000 diff --git a/src/learning_curves.jl b/src/learning_curves.jl index dd96010..4ca1bef 100644 --- a/src/learning_curves.jl +++ b/src/learning_curves.jl @@ -78,9 +78,11 @@ function learning_curve(model::Supervised, args...; resolution=30, resampling=Holdout(), weights=nothing, - measure=nothing, + measures=nothing, + measure=measures, operation=predict, - range::Union{Nothing,ParamRange}=nothing, + ranges::Union{Nothing,ParamRange}=nothing, + range::Union{Nothing,ParamRange}, repeats=1, acceleration=default_resource(), acceleration_grid=CPU1(), diff --git a/test/learning_curves.jl b/test/learning_curves.jl index a6278bf..cb8fed3 100644 --- a/test/learning_curves.jl +++ b/test/learning_curves.jl @@ -66,6 +66,14 @@ y = 2*x1 .+ 5*x2 .- 3*x3 .+ 0.2*rand(100); rng_name=:rng) @test curves2.measurements ≈ curves.measurements + # alternative signature: + curves3 = learning_curve(ensemble, X, y; range=r_n, resolution=7, + acceleration=accel, + rngs = 3, + rng_name=:rng) + + @test curves2.measurements ≈ curves3.measurements + end end # module From eb8c9e0d6cae6a62f0ebe0009b76b07363f2676d Mon Sep 17 00:00:00 2001 From: "Anthony D. Blaom" Date: Tue, 28 Jan 2020 12:12:29 +1300 Subject: [PATCH 4/4] readme corrections --- README.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d134a5c..24591d3 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,12 @@ MLJ user. Rather, MLJTuning is a dependency of the learning platform, which allows MLJ users to perform a variety of hyperparameter optimization tasks from there. -MLJTUning is the place for developers to integrate hyperparameter +MLJTuning is the place for developers to integrate hyperparameter optimization algorithms (here called *tuning strategies*) into MLJ, either by adding code to [/src/strategies](/src/strategies), or by -importing MLJTuning into a third-pary package and and implementing -MLJTuning's interface. +importing MLJTuning into a third-pary package and implementing +MLJTuning's [tuning strategy +interface](#implementing-a-new-tuning-strategy). MLJTuning is a component of the [MLJ stack](https://github.com/alan-turing-institute/MLJ.jl#the-mlj-universe) @@ -49,9 +50,10 @@ This repository contains: hyperparameters (using cross-validation or other resampling strategy) before training the optimal model on all supplied data -- an abstract **tuning strategy interface** to allow developers to - conveniently implement common hyperparameter optimization - strategies, such as: +- an abstract **[tuning strategy + interface]((#implementing-a-new-tuning-strategy))** to allow + developers to conveniently implement common hyperparameter + optimization strategies, such as: - [x] search a list of explicitly specified models `list = [model1, model2, ...]` @@ -100,7 +102,7 @@ elaboration on those terms given in *italics*. All tuning in MLJ is conceptualized as an iterative procedure, each iteration corresponding to a performance *evaluation* of a single -*model*. Each such model is a mutation of a fixed *prototype*. In the +*model*. Each such model is a mutated clone of a fixed prototype. In the general case, this prototype is a composite model, i.e., a model with other models as hyperparameters, and while the type of the prototype mutations is fixed, the types of the sub-models are allowed to vary. @@ -293,7 +295,7 @@ preferred "central value". These default to `(upper - lower)/2` and `(upper + lower)/2`, respectively, in the bounded case (neither `upper = Inf` nor `lower = -Inf`). The fields `origin` and `unit` are used in generating grids for unbounded ranges (and could be used in other -strategies for fitting two-parameter probability distributions, for +strategies - for fitting two-parameter probability distributions, for example). A `ParamRange` object is always associated with the name of a