Skip to content

Commit

Permalink
Merge branch 'JuliaAI:dev' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
pebeto authored Mar 7, 2024
2 parents 4b3a32e + da15fb3 commit f4085d7
Show file tree
Hide file tree
Showing 14 changed files with 110 additions and 32 deletions.
8 changes: 8 additions & 0 deletions .github/codecov.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
threshold: 0.5%
patch:
default:
target: 80%
7 changes: 5 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ jobs:
env:
JULIA_NUM_THREADS: '2'
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v1
- uses: codecov/codecov-action@v3
with:
file: lcov.info
token: 080b3e97-0ae1-4282-b626-bcc1a93d158c
files: lcov.info
fail_ci_if_error: false
verbose: true
10 changes: 6 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "MLJTuning"
uuid = "03970b2e-30c4-11ea-3135-d1576263f10f"
authors = ["Anthony D. Blaom <[email protected]>"]
version = "0.7.4"
version = "0.8.1"

[deps]
ComputationalResources = "ed09eef8-17a6-5b46-8889-db040fac31e3"
Expand All @@ -12,14 +12,16 @@ MLJBase = "a7f614a8-145f-11e9-1d2a-a57a1082229d"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
StatisticalMeasuresBase = "c062fc1d-0d66-479b-b6ac-8b44719de4cc"

[compat]
ComputationalResources = "0.3"
Distributions = "0.22,0.23,0.24, 0.25"
LatinHypercubeSampling = "1.7.2"
MLJBase = "0.20, 0.21"
MLJBase = "1"
ProgressMeter = "1.7.1"
RecipesBase = "0.8,0.9,1"
StatisticalMeasuresBase = "0.1.1"
julia = "1.6"

[extras]
Expand All @@ -35,11 +37,11 @@ NearestNeighbors = "b8a86587-4115-5ab1-83bc-aa920d37bbce"
ScientificTypes = "321657f4-b219-11e9-178b-2701a2544e81"
Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
StatisticalMeasures = "a19d573c-0a75-4610-95b3-7071388c7541"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["CategoricalArrays", "ComputationalResources", "DecisionTree", "Distances", "Distributed", "LinearAlgebra", "MLJModelInterface", "MultivariateStats", "NearestNeighbors", "ScientificTypes", "Serialization", "StableRNGs", "Statistics", "StatsBase", "Tables", "Test"]

test = ["CategoricalArrays", "ComputationalResources", "DecisionTree", "Distances", "Distributed", "LinearAlgebra", "MLJModelInterface", "MultivariateStats", "NearestNeighbors", "ScientificTypes", "Serialization", "StableRNGs", "StatisticalMeasures", "Statistics", "StatsBase", "Tables", "Test"]
1 change: 1 addition & 0 deletions src/MLJTuning.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ using MLJBase
import MLJBase: Bounded, Unbounded, DoublyUnbounded,
LeftUnbounded, RightUnbounded, _process_accel_settings, chunks,
restore, save
import StatisticalMeasuresBase as SMB
using RecipesBase
using Distributed
import Distributions
Expand Down
2 changes: 1 addition & 1 deletion src/plotrecipes.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
@recipe function f(mach::MLJBase.Machine{<:EitherTunedModel})
rep = report(mach)
measurement = string(typeof(rep.best_history_entry.measure[1]))
measurement = repr(rep.best_history_entry.measure[1])
r = rep.plotting
z = r.measurements
X = r.parameter_values
Expand Down
30 changes: 24 additions & 6 deletions src/strategies/explicit.jl
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
const WARN_INCONSISTENT_PREDICTION_TYPE =
"Not all models to be evaluated have the same prediction type, and this may "*
"cause problems for some measures. For example, a probabilistic metric "*
"like `log_loss` cannot be applied to a model making point (deterministic) "*
"predictions. Inspect the prediction type with "*
"`prediction_type(model)`. "

mutable struct Explicit <: TuningStrategy end

struct ExplicitState{R, N}
range::R # a model-generating iterator
next::N # to hold output of `iterate(range)`
next::N # to hold output of `iterate(range)`
prediction_type::Symbol
user_warned::Bool
end

ExplictState(r::R, n::N) where {R,N} = ExplicitState{R, Union{Nothing, N}}(r, n)

function MLJTuning.setup(tuning::Explicit, model, range, n, verbosity)
next = iterate(range)
return ExplicitState(range, next)
return ExplicitState(range, next, MLJBase.prediction_type(model), false)
end

# models! returns as many models as possible but no more than `n_remaining`:
Expand All @@ -20,11 +27,21 @@ function MLJTuning.models(tuning::Explicit,
n_remaining,
verbosity)

range, next = state.range, state.next
range, next, prediction_type, user_warned =
state.range, state.next, state.prediction_type, state.user_warned

function check(m)
if !user_warned && verbosity > -1 && MLJBase.prediction_type(m) != prediction_type
@warn WARN_INCONSISTENT_PREDICTION_TYPE
user_warned = true
end
end

next === nothing && return nothing, state

m, s = next
check(m)

models = Any[m, ] # types not known until run-time

next = iterate(range, s)
Expand All @@ -33,12 +50,13 @@ function MLJTuning.models(tuning::Explicit,
while i < n_remaining
next === nothing && break
m, s = next
check(m)
push!(models, m)
i += 1
next = iterate(range, s)
end

new_state = ExplicitState(range, next)
new_state = ExplicitState(range, next, prediction_type, user_warned)

return models, new_state

Expand Down
8 changes: 5 additions & 3 deletions src/tuned_models.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
const ERR_SPECIFY_MODEL = ArgumentError(
"You need to specify `model=...`, unless `tuning=Explicit()`. ")
const ERR_SPECIFY_RANGE = ArgumentError(
"You need to specify `range=...`, unless `tuning=Explicit` and "*
"You need to specify `range=...`, unless `tuning=Explicit()` and "*
"and `models=...` is specified instead. ")
const ERR_SPECIFY_RANGE_OR_MODELS = ArgumentError(
"No `model` specified. Either specify an explicit iterator "*
Expand All @@ -12,7 +12,7 @@ const ERR_SPECIFY_RANGE_OR_MODELS = ArgumentError(
const ERR_NEED_EXPLICIT = ArgumentError(
"You have specified an explicit "*
"iterator `models` of MLJModels and so cannot "*
"specify any `tuning` strategy except `Explicit`. Either omit the "*
"specify any `tuning` strategy except `Explicit()`. Either omit the "*
"`tuning=...` specification, or specify a *single* model using "*
"`model=...` instead. ")
const ERR_BOTH_DISALLOWED = ArgumentError(
Expand Down Expand Up @@ -438,9 +438,11 @@ function event!(metamodel,
state)
model = _first(metamodel)
metadata = _last(metamodel)
force = typeof(resampling_machine.model.model) !=
typeof(model)
resampling_machine.model.model = model
verb = (verbosity >= 2 ? verbosity - 3 : verbosity - 1)
fit!(resampling_machine, verbosity=verb)
fit!(resampling_machine; verbosity=verb, force)
E = evaluate(resampling_machine)
entry0 = (model = model,
measure = E.measure,
Expand Down
5 changes: 3 additions & 2 deletions src/utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,11 @@ function delete(nt::NamedTuple, target_keys...)
return (; filtered...)
end

# `SMB` is alias for `StatisticalMeasuresBase`
signature(measure) =
if orientation(measure) == :loss
if SMB.orientation(measure) == SMB.Loss()
1
elseif orientation(measure) == :score
elseif SMB.orientation(measure) == SMB.Score()
-1
else
0
Expand Down
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ addprocs(2)
using Test
using MLJTuning
using MLJBase
using StatisticalMeasures
using StableRNGs

# Display Number of processes and if necessary number
Expand Down
39 changes: 39 additions & 0 deletions test/strategies/explicit.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
good = KNNClassifier(K=2)
bad = KNNClassifier(K=10)
ugly = ConstantClassifier()
evil = DeterministicConstantClassifier()

r = [good, bad, ugly]

Expand Down Expand Up @@ -44,4 +45,42 @@ X, y = make_blobs(rng=rng)
@test_throws ArgumentError TunedModel(; models=[dcc, dcc])
end

r = [good, bad, evil, ugly]

@testset "inconsistent prediction types" begin
# case where different predictions types is actually okay (but still
# a warning is issued):
tmodel = TunedModel(
models=r,
resampling = Holdout(),
measure=accuracy,
)
@test_logs(
(:warn, MLJTuning.WARN_INCONSISTENT_PREDICTION_TYPE),
MLJBase.fit(tmodel, 0, X, y),
);

# verbosity = -1 suppresses the warning:
@test_logs(
MLJBase.fit(tmodel, -1, X, y),
);

# case where there really is a problem with different prediction types:
tmodel = TunedModel(
models=r,
resampling = Holdout(),
measure=log_loss,
)
@test_logs(
(:warn, MLJTuning.WARN_INCONSISTENT_PREDICTION_TYPE),
(:error,),
(:info,),
(:info,),
@test_throws(
ArgumentError, # indicates the problem is with incompatible measure
MLJBase.fit(tmodel, 0, X, y),
)
)
end

true
1 change: 1 addition & 0 deletions test/strategies/grid.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ module TestGrid

using Test
using MLJBase
using StatisticalMeasures
using MLJTuning
# include("../test/models.jl")
# using .Models
Expand Down
1 change: 1 addition & 0 deletions test/strategies/latin_hypercube.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module TestLatinHypercube
using Test
using MLJBase
using MLJTuning
using StatisticalMeasures
using LatinHypercubeSampling
import Distributions
import Random
Expand Down
1 change: 1 addition & 0 deletions test/strategies/random_search.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ module TestRandomSearch

using Test
using MLJBase
using StatisticalMeasures
using MLJTuning
import Distributions
import Random
Expand Down
28 changes: 14 additions & 14 deletions test/tuned_models.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,41 +29,41 @@ r = [m(K) for K in 13:-1:2]

@testset "constructor" begin
@test_throws(MLJTuning.ERR_SPECIFY_RANGE,
TunedModel(model=first(r), tuning=Grid(), measure=rms))
TunedModel(model=first(r), tuning=Grid(), measure=l2))
@test_throws(MLJTuning.ERR_SPECIFY_RANGE,
TunedModel(model=first(r), measure=rms))
TunedModel(model=first(r), measure=l2))
@test_throws(MLJTuning.ERR_BOTH_DISALLOWED,
TunedModel(model=first(r),
models=r, tuning=Explicit(), measure=rms))
tm = @test_logs TunedModel(models=r, tuning=Explicit(), measure=rms)
models=r, tuning=Explicit(), measure=l2))
tm = @test_logs TunedModel(models=r, tuning=Explicit(), measure=l2)
@test tm.tuning isa Explicit && tm.range ==r && tm.model == first(r)
@test input_scitype(tm) == Unknown
@test TunedModel(models=r, measure=rms) == tm
@test TunedModel(models=r, measure=l2) == tm
@test_logs (:info, r"No measure") @test TunedModel(models=r) == tm

@test_throws(MLJTuning.ERR_SPECIFY_MODEL,
TunedModel(range=r, measure=rms))
TunedModel(range=r, measure=l2))
@test_throws(MLJTuning.ERR_MODEL_TYPE,
TunedModel(model=42, tuning=Grid(),
range=r, measure=rms))
range=r, measure=l2))
@test_logs (:info, MLJTuning.INFO_MODEL_IGNORED) tm =
TunedModel(model=42, tuning=Explicit(), range=r, measure=rms)
TunedModel(model=42, tuning=Explicit(), range=r, measure=l2)
@test_logs (:info, r"No measure") tm =
TunedModel(model=first(r), range=r)
@test_throws(MLJTuning.ERR_SPECIFY_RANGE_OR_MODELS,
TunedModel(tuning=Explicit(), measure=rms))
TunedModel(tuning=Explicit(), measure=l2))
@test_throws(MLJTuning.ERR_NEED_EXPLICIT,
TunedModel(models=r, tuning=Grid()))
@test_logs TunedModel(first(r), range=r, measure=rms)
@test_logs TunedModel(first(r), range=r, measure=l2)
@test_logs(
(:warn, MLJTuning.warn_double_spec(first(r), last(r))),
TunedModel(first(r), model=last(r), range=r, measure=rms),
TunedModel(first(r), model=last(r), range=r, measure=l2),
)
@test_throws(
MLJTuning.ERR_TOO_MANY_ARGUMENTS,
TunedModel(first(r), last(r), range=r, measure=rms),
TunedModel(first(r), last(r), range=r, measure=l2),
)
tm = @test_logs TunedModel(model=first(r), range=r, measure=rms)
tm = @test_logs TunedModel(model=first(r), range=r, measure=l2)
@test tm.tuning isa RandomSearch
@test input_scitype(tm) == Table(Continuous)

Expand All @@ -79,7 +79,7 @@ results = [(evaluate(model, X, y,
tm = TunedModel(
models=r,
resampling=CV(nfolds=2),
measures=cross_entropy
measures=cross_entropy,
)
@test_logs((:error, r"Problem"),
(:info, r""),
Expand Down

0 comments on commit f4085d7

Please sign in to comment.