diff --git a/REQUIRE b/REQUIRE index 4345298..9ef251d 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,5 +1,6 @@ -julia 0.6 0.7- +julia 0.7 Ipopt Clp -JuMP 0.15 0.18.5- -Distributions \ No newline at end of file +JuMP 0.18 0.19- +Distributions +MathProgBase \ No newline at end of file diff --git a/examples/2d_example.jl b/examples/2d_example.jl index b9409b0..8c36667 100644 --- a/examples/2d_example.jl +++ b/examples/2d_example.jl @@ -1,6 +1,4 @@ -using JuMP -using FlexJuMP -using Gurobi +using JuMP, FlexJuMP, Gurobi # Set the covariance matrix for the uncertain parameters means = [4.0; 5.0] diff --git a/examples/3node_distribution.jl b/examples/3node_distribution.jl index 784c911..0a1c5a1 100644 --- a/examples/3node_distribution.jl +++ b/examples/3node_distribution.jl @@ -1,6 +1,7 @@ using JuMP using FlexJuMP using Gurobi +using LinearAlgebra # Set the dimensions n_lines = 2 diff --git a/examples/IEEE_14.jl b/examples/IEEE_14.jl index 6bfc7c0..2637827 100644 --- a/examples/IEEE_14.jl +++ b/examples/IEEE_14.jl @@ -2,6 +2,7 @@ using JuMP using FlexJuMP using Gurobi using Pavito, Ipopt +using LinearAlgebra # Set the dimensions n_gens = 5 @@ -10,7 +11,7 @@ n_dems = 11 # Setup the uncertainty set parameters β = 240. -covar = eye(n_dems) * 1200. +covar = Matrix(I, n_dems, n_dems) * 1200. covar[covar .== 0] = β box_dev = ones(n_dems) * 2 * sqrt(covar[1]) diff --git a/src/FlexJuMP.jl b/src/FlexJuMP.jl index 486ed9c..5fbc88e 100644 --- a/src/FlexJuMP.jl +++ b/src/FlexJuMP.jl @@ -30,6 +30,8 @@ module FlexJuMP using Distributions using JuMP + using LinearAlgebra + using Random using Ipopt, Clp # Default solver # Define functions/macros to be readily accesible to the user diff --git a/src/datatypes.jl b/src/datatypes.jl index 84bb6d6..8b02397 100644 --- a/src/datatypes.jl +++ b/src/datatypes.jl @@ -120,7 +120,7 @@ and `RecourseVariable`), the uncertainty set, and solution results. - `recourse_cols::Vector{Int}` The index of each `RecourseVariable`. - `uncertainty_set::AbstractUncertaintySet` The uncertainty set DataType with all of the set specfic attributes. - `covariance::Matrix{Number}` The covariance matrix. -- `flexibility_index::Union{Void, Number}` The flexibility index result obtained from solving the flexibility model. +- `flexibility_index::Union{Nothing, Number}` The flexibility index result obtained from solving the flexibility model. - `active_constraints::Vector{Int}` The indexes of the active inequality constraints at the solution of the flexibility model. - 'solution_time::Number' The solution time in seconds. """ @@ -141,9 +141,9 @@ mutable struct FlexibilityData # Various formulation data/results uncertainty_set::AbstractUncertaintySet #e.g. ellipsoidal, 1-norm, 2-norm, etc... covariance::Matrix{Number} - flexibility_index::Union{Void, Number} + flexibility_index::Union{Nothing, Number} active_constraints::Vector{Int} - solution_time::Union{Void, Number} + solution_time::Union{Nothing, Number} end # Set methods to extract covariance information if appropriate diff --git a/src/functions.jl b/src/functions.jl index a4c46b8..5bce263 100644 --- a/src/functions.jl +++ b/src/functions.jl @@ -39,20 +39,20 @@ function rankinequalities(m::Model; max_ranks::Int = 5, suppress_warnings::Bool # Initialize data m_copy = deepcopy(m) - ranked_data = Vector{Dict}(max_ranks) + ranked_data = Vector{Dict}(undef, max_ranks) ranked_constrs = [] inactives = [] counter = 0 # Get the inequality information constr_bounds = JuMP.prepConstrBounds(m_copy) - inequal_inds = find(constr_bounds[1] .!= constr_bounds[2]) + inequal_inds = findall(constr_bounds[1] .!= constr_bounds[2]) # Iteratively solve the flexibility index problem and extract data for i = 1:max_ranks status = solve(m_copy, suppress_warnings = suppress_warnings, U = U, diag = diag, active_constr = active_constr, real_recourse_dim = real_recourse_dim, conic_δ = conic_δ, inactives = inactives) if status == :Optimal - ranked_constrs = [find(inequal_inds .== getactiveconstraints(m_copy)[j])[1] for j = 1:length(getactiveconstraints(m_copy))] + ranked_constrs = [findall(inequal_inds .== getactiveconstraints(m_copy)[j])[1] for j = 1:length(getactiveconstraints(m_copy))] inactives = unique([inactives; ranked_constrs]) ranked_data[i] = Dict("flexibility_index" => getflexibilityindex(m_copy), "active_constraints" => getactiveconstraints(m_copy), "model" => m_copy) counter += 1 @@ -147,11 +147,11 @@ function findstochasticflexibility(m::Model; num_pts::Int = 10000, toler::Number # Setup a multivariate normal distribution and Monte Carlo samples if seed >= 0 - srand(seed) + Random.seed!(seed) end d = MvNormal(Vector{Float64}(flex_data.RVmeans), Matrix{Float64}(getcovariance(m))) if only_positive - samples = Array{Float64}(flex_data.numRVs, num_pts) + samples = Array{Float64}(undef, flex_data.numRVs, num_pts) not_done = true counter = 1 while not_done @@ -171,7 +171,7 @@ function findstochasticflexibility(m::Model; num_pts::Int = 10000, toler::Number # Determine which points are outside of the set and check for F F = getflexibilityindex(m) if use_flexibility_index && F == nothing - warn("Flexibility index hasn't yet been computed. Setting use_flexibility_index = false.") + @warn "Flexibility index hasn't yet been computed. Setting use_flexibility_index = false." use_flexibility_index = false elseif use_flexibility_index && flex_data.uncertainty_set.name == :Ellipsoid inv_covar = inv(getcovariance(m)) @@ -179,7 +179,7 @@ function findstochasticflexibility(m::Model; num_pts::Int = 10000, toler::Number elseif use_flexibility_index && flex_data.uncertainty_set.name == :PNorm outside_set = [norm(samples[:, k] - θ_nom, flex_data.uncertainty_set.p) > F for k = 1:num_pts] elseif use_flexibility_index && flex_data.uncertainty_set.name == :Hyperbox - outside_set = Vector{Bool}(num_pts) + outside_set = Vector{Bool}(undef, num_pts) for k = 1:num_pts all_inside = true for i = 1:flex_data.numRVs @@ -280,7 +280,7 @@ function findstochasticflexibility(m::Model; num_pts::Int = 10000, toler::Number end end if infeasible_counter != 0 - warn("Not all scenario subproblems not solved to optimality, estmation of SF might not be correct.") + @warn "Not all scenario subproblems not solved to optimality, estmation of SF might not be correct." end if use_flexibility_index return (sum(feasible_results) + num_pts - num_test_pts) / (num_pts - infeasible_counter) diff --git a/src/macros.jl b/src/macros.jl index adc881b..e98a4bc 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -1,5 +1,5 @@ -using Base.Meta -if Pkg.installed("JuMP") >= v"0.18.4" +using Base.Meta, Pkg +if Pkg.installed()["JuMP"] >= v"0.18.4" import JuMP.undef end diff --git a/src/mean.jl b/src/mean.jl index 43aa2cf..6f8a46b 100644 --- a/src/mean.jl +++ b/src/mean.jl @@ -132,7 +132,8 @@ function ComputeCenter(m::Model, center::Symbol, solver, toler::Number, only_pos # Solve and return status = solve(m_solve) if center == :feasible && getvalue(u) > toler - warn("Optimized mean is not feasible, can only achieve inequalities with upper bound u = ", getvalue(u)) + upper_b = getvalue(u) + @warn "Optimized mean is not feasible, can only achieve inequalities with upper bound u = $upper_b" end return getvalue(θ) end diff --git a/src/model.jl b/src/model.jl index 529c34d..c42bd6c 100644 --- a/src/model.jl +++ b/src/model.jl @@ -19,7 +19,7 @@ Solver is Gurobi function FlexibilityModel(;solver = JuMP.UnsetSolver()) m = Model(solver = solver) m.solvehook = solvehook #solvehook is defined in solve.jl - m.ext[:FlexData] = FlexibilityData(FlexibilityConstraint[], 0, Float64[], String[], Int[], 0, String[], Int[], EllipsoidalSet(), Matrix(0, 0), nothing, Int[], nothing) + m.ext[:FlexData] = FlexibilityData(FlexibilityConstraint[], 0, Float64[], String[], Int[], 0, String[], Int[], EllipsoidalSet(), Matrix(undef, 0, 0), nothing, Int[], nothing) return m end @@ -33,7 +33,7 @@ regular JuMP model. ```julia julia> getflexibilitydata(m) -FlexJuMP.FlexibilityData(JuMP.AbstractConstraint[], 0, Number[], AbstractString[], Int64[], 0, AbstractString[], Int64[], FlexJuMP.EllipsoidalSet(:Ellipsoid, false), Array{Number}(0,0), nothing, Int64[]) +FlexJuMP.FlexibilityData(JuMP.AbstractConstraint[], 0, Number[], AbstractString[], Int64[], 0, AbstractString[], Int64[], FlexJuMP.EllipsoidalSet(:Ellipsoid, false), Array{Number}(undef,0,0), nothing, Int64[], nothing) ``` """ function getflexibilitydata(m::Model) @@ -154,7 +154,7 @@ function setcovariance(m::Model, covariance::Matrix) n_row,ncol = size(covariance) n_row == ncol || error("Covariance matrix should be square") issymmetric(covariance) || error("Covariance matrix should be symmetric") - all(eig(covariance)[1] .>= 0) || error("Covariance matrix is not positive semi-definite") + all(eigen(covariance).values .>= 0) || error("Covariance matrix is not positive semi-definite") # Set the covariance matrix flex_data = getflexibilitydata(m) diff --git a/src/solve.jl b/src/solve.jl index 37e0c12..34fe072 100644 --- a/src/solve.jl +++ b/src/solve.jl @@ -46,8 +46,9 @@ function solvehook(m::Model; suppress_warnings::Bool = false, U::Number = 10000, # Check that U is sufficiently large if maximum(abs.(fConsts)) >= U - warn("The slack upper bound (U) value of $U is too low for this problem. Thus, U is being set to ", 10 ^ ceil(log10(maximum(abs.(fConsts)))), "!") - U = 10 ^ ceil(log10(maximum(abs.(fConsts)))) + U_new = 10 ^ ceil(log10(maximum(abs.(fConsts)))) + @warn "The slack upper bound (U) value of $U is too low for this problem. Thus, U is being set to $U_new !" + U = U_new end # Inilialize the JuMP model to solve the MICP Problem @@ -88,12 +89,12 @@ function solvehook(m::Model; suppress_warnings::Bool = false, U::Number = 10000, end if active_constr || n_x != 0 || n_z == 0 if !active_constr - warn("Problem requires use of active set constraint. Setting active_constr = true") + @warn "Problem requires use of active set constraint. Setting active_constr = true" end if real_recourse_dim == -1 real_recourse_dim = n_z if n_h != 0 && n_x == 0 - warn("real_recourse_dim not specified. Setting real_recourse_dim to ", n_z) + @warn "real_recourse_dim not specified. Setting real_recourse_dim to $n_z" end end @constraint(m_solve, sum(y[j] for j = 1:n_f) == real_recourse_dim + 1) @@ -162,7 +163,7 @@ function solvehook(m::Model; suppress_warnings::Bool = false, U::Number = 10000, if n_θ != size(covar)[1] error("The dimensions of the covariance matrix and the random variables do not match.") end - D, V = eig(inv(covar)) + D, V = eigen(inv(covar)) @variable(m_solve, w[1:n_θ]) @constraint(m_solve, ws[i = 1:n_θ], w[i] == sqrt(D[i]) * sum(V[j, i] * (θ - θ_nom)[j] for j = 1:n_θ)) if conic_δ @@ -211,9 +212,7 @@ function solvehook(m::Model; suppress_warnings::Bool = false, U::Number = 10000, end # Solve the model - tic() - status = solve(m_solve, suppress_warnings = suppress_warnings, ignore_solve_hook = true) - tic_time = toq() + tic_time = @elapsed status = solve(m_solve, suppress_warnings = suppress_warnings, ignore_solve_hook = true) if status == :Optimal # Parse the optimized random variable values @@ -239,7 +238,7 @@ function solvehook(m::Model; suppress_warnings::Bool = false, U::Number = 10000, end # Parse the active active constraints - active_inds = find(abs.(getvalue(y) - 1) .<= 1e-4) + active_inds = findall(abs.(getvalue(y) - 1) .<= 1e-4) flex_data.active_constraints = inequal_inds[active_inds] # Save the flexibility index @@ -277,8 +276,8 @@ function MakeInputDict(m::Model) constr_bounds = JuMP.prepConstrBounds(m) #A lower and A upper # Determine which constraints are inequalities and equalities - equal_inds = find(constr_bounds[1] .== constr_bounds[2]) - inequal_inds = find(constr_bounds[1] .!= constr_bounds[2]) + equal_inds = findall(constr_bounds[1] .== constr_bounds[2]) + inequal_inds = findall(constr_bounds[1] .!= constr_bounds[2]) # Parse the problem dimensions n_f = length(inequal_inds) @@ -288,10 +287,10 @@ function MakeInputDict(m::Model) n_x = size(constr_coeffs)[2] - n_θ - n_z # Check inequality directions and adjust as necessary - reversed_inds = find(constr_bounds[2] .== Inf) + reversed_inds = findall(constr_bounds[2] .== Inf) constr_coeffs[reversed_inds, :] *= -1 constr_bounds[2][reversed_inds] = -constr_bounds[1][reversed_inds] - constr_bounds[1][reversed_inds] = -Inf + constr_bounds[1][reversed_inds] .= -Inf # Parse the coefficent matrices needed for the flexibility index problem fConsts = -constr_bounds[2][inequal_inds] @@ -308,7 +307,7 @@ function MakeInputDict(m::Model) if n_x != 0 num_vars = n_x + n_z + n_θ var_cols = collect(1:num_vars) - state_cols = var_cols[BitArray(!contains(==, flex_data.RVcols, var_cols[i]) && !contains(==, flex_data.recourse_cols, var_cols[i]) for i = 1:num_vars)] + state_cols = var_cols[BitArray(!any((y-> ==(y, var_cols[i])), flex_data.RVcols) && !any((y-> ==(y, var_cols[i])), flex_data.recourse_cols) for i = 1:num_vars)] fStates = constr_coeffs[inequal_inds, state_cols] hStates = constr_coeffs[equal_inds, state_cols] else @@ -363,11 +362,11 @@ function AddSystemExpressions(m::Model, input_dict::Dict, num_scenarios::Int = 0 @expression(m, fexpr[j = 1:n_f], fConsts[j] + sum(fControls[j, i] * m[:z][i] for i = 1:n_z) + sum(fRandoms[j, i] * m[:θ][i] for i = 1:n_θ) + sum(fStates[j, i] * m[:x][i] for i = 1:n_x)) if n_h != 0 - @expression(m, hexpr[j = 1:n_h], hConsts[j] + sum(hControls[j, i] * m[:z][i]for i = 1:n_z) + sum(hRandoms[j, i] * m[:θ][i] for i = 1:n_θ) + + @expression(m, hexpr[j = 1:n_h], hConsts[j] + sum(hControls[j, i] * m[:z][i] for i = 1:n_z) + sum(hRandoms[j, i] * m[:θ][i] for i = 1:n_θ) + sum(hStates[j, i] * m[:x][i] for i = 1:n_x)) end else - @expression(m, fexpr[j = 1:n_f], fConsts[j] + sum(fControls[j, i] * m[:z][i]for i = 1:n_z) + sum(fRandoms[j, i] * m[:θ][i] for i = 1:n_θ)) + @expression(m, fexpr[j = 1:n_f], fConsts[j] + sum(fControls[j, i] * m[:z][i] for i = 1:n_z) + sum(fRandoms[j, i] * m[:θ][i] for i = 1:n_θ)) if n_h != 0 @expression(m, hexpr[j = 1:n_h], hConsts[j] + sum(hControls[j, i] * m[:z][i] for i = 1:n_z) + sum(hRandoms[j, i] * m[:θ][i] for i = 1:n_θ)) end @@ -395,7 +394,7 @@ function AddSystemExpressions(m::Model, input_dict::Dict, num_scenarios::Int = 0 sum(hStates[j, i] * m[:x][i, k] for i = 1:n_x)) end else - @expression(m, fexpr[j = 1:n_f, k = 1:num_scenarios], fConsts[j] + sum(fControls[j, i] * m[:z][i, k]for i = 1:n_z) + sum(fRandoms[j, i] * m[:θ][i, k] for i = 1:n_θ)) + @expression(m, fexpr[j = 1:n_f, k = 1:num_scenarios], fConsts[j] + sum(fControls[j, i] * m[:z][i, k] for i = 1:n_z) + sum(fRandoms[j, i] * m[:θ][i, k] for i = 1:n_θ)) if n_h != 0 @expression(m, hexpr[j = 1:n_h, k = 1:num_scenarios], hConsts[j] + sum(hControls[j, i] * m[:z][i, k] for i = 1:n_z) + sum(hRandoms[j, i] * m[:θ][i, k] for i = 1:n_θ)) end diff --git a/src/uncertaintyset.jl b/src/uncertaintyset.jl index 4ad5176..20cf5ec 100644 --- a/src/uncertaintyset.jl +++ b/src/uncertaintyset.jl @@ -40,13 +40,13 @@ function setuncertaintyset(m::Model, uncertainty_set::Symbol, attribute = nothin # Setup ellipdoial set if specified and run checks if uncertainty_set == :Ellipsoid - if isa(attribute, Void) && length(flex_data.covariance) == 0 + if isa(attribute, Nothing) && length(flex_data.covariance) == 0 error("Ellipsoidal set requires a covariance matrix, but one is not provided.") - elseif !isa(attribute, Matrix) && !isa(attribute, Void) + elseif !isa(attribute, Matrix) && !isa(attribute, Nothing) error("Expected ellipsoidal attribute to be covariance matrix of type Matrix, but got attribute of type $attr_type.") end flex_data.uncertainty_set = EllipsoidalSet() - if !isa(attribute, Void) + if !isa(attribute, Nothing) setcovariance(m, attribute) end flex_data.uncertainty_set.only_positive = only_positive; nothing diff --git a/test/define_uncertainty_set.jl b/test/define_uncertainty_set.jl index 1c2b39a..4bbcef8 100644 --- a/test/define_uncertainty_set.jl +++ b/test/define_uncertainty_set.jl @@ -28,5 +28,5 @@ setuncertaintyset(m2, :PNorm, 1, only_positive = true) @test flex_data2.uncertainty_set.name == :PNorm && flex_data2.uncertainty_set.only_positive setuncertaintyset(m2, :Hyperbox, [[[1; 1]]; [[1; 1]]], only_positive = true) @test flex_data2.uncertainty_set.name == :Hyperbox && flex_data2.uncertainty_set.only_positive -setuncertaintyset(m2, :Ellipsoid, eye(flex_data2.numRVs), only_positive = true) +setuncertaintyset(m2, :Ellipsoid, Matrix(I, flex_data2.numRVs, flex_data2.numRVs), only_positive = true) @test flex_data2.uncertainty_set.name == :Ellipsoid && flex_data2.uncertainty_set.only_positive diff --git a/test/manage_covariance.jl b/test/manage_covariance.jl index 2c004dc..13783d7 100644 --- a/test/manage_covariance.jl +++ b/test/manage_covariance.jl @@ -3,11 +3,11 @@ @test length(getcovariance(m2)) == 0 # Test the get/set functions -setcovariance(m, eye(flex_data.numRVs)) -@test all(getcovariance(m) .== eye(flex_data.numRVs)) +setcovariance(m, Matrix(I, flex_data.numRVs, flex_data.numRVs)) +@test all(getcovariance(m) .== Matrix(I, flex_data.numRVs, flex_data.numRVs)) # Check the safeguards and reset the covariance matrix @test_throws ErrorException setcovariance(m, [1 1]) @test_throws ErrorException setcovariance(m, [1 2; 3 4]) @test_throws ErrorException setcovariance(m, [-2 0; 0 -3]) -flex_data.covariance = Matrix(0, 0) +flex_data.covariance = Matrix(undef, 0, 0) diff --git a/test/runtests.jl b/test/runtests.jl index ff30b92..d2fcdf0 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,52 +6,44 @@ else end using Pavito, GLPKMathProgInterface, Ipopt using JuMP +using LinearAlgebra # Run basic model definition tests -tic() -@testset "Model Definition Testing" begin + +@time @testset "Model Definition Testing" begin @test include("initialize_model.jl") @test include("set_single_variables.jl") @test include("set_vector_variables.jl") @test include("define_constraints.jl") @test include("define_real_model.jl") end -toc() print("\n") # Check basic data manipulation functions -tic() -@testset "Data Access Testing" begin +@time @testset "Data Access Testing" begin @testset "Data Extraction" begin include("get_flexdata.jl") end @testset "Get/Set Covariance Matrix" begin include("manage_covariance.jl") end @testset "Get/Set Mean" begin include("manage_mean.jl") end end -toc() print("\n") # Check functions that can be used before solving the model -tic() -@testset "Pre-Solve Method Testing" begin +@time @testset "Pre-Solve Method Testing" begin @testset "Set Uncertainty Set" begin include("define_uncertainty_set.jl") end @testset "Check Mean" begin include("check_mean.jl") end @testset "Find Centered Mean" begin include("find_center.jl") end @testset "SF Index" begin include("sf_index.jl") end end -toc() print("\n") # Extensively test the solvehook -tic() -@testset "Solvehook Testing" begin include("solvehook_checks.jl") end -toc() +@time @testset "Solvehook Testing" begin include("solvehook_checks.jl") end print("\n") # Test function that can be called after the solution of the solvehook -tic() -@testset "Post-Solve Method Testing" begin +@time @testset "Post-Solve Method Testing" begin @testset "Confidence Level" begin include("confidence_level.jl") end @testset "Critical Point" begin include("retrieve_critical_pt.jl") end @testset "Rank Constraints" begin include("constraint_ranker.jl") end @testset "SF Index" begin include("sf_index2.jl") end end -toc() diff --git a/test/sf_index.jl b/test/sf_index.jl index cf9d2fd..f9b1493 100644 --- a/test/sf_index.jl +++ b/test/sf_index.jl @@ -1,7 +1,7 @@ # Compute the base SF index and verify same answer with culnerability model SF = findstochasticflexibility(m, num_pts = 100, seed = 42) @test abs(findstochasticflexibility(m, num_pts = 100, use_vulnerability_model = true, seed = 42) - SF) <= 1e-5 -@test_warn "Flexibility index hasn't yet been computed." abs(findstochasticflexibility(m, num_pts = 100, use_flexibility_index = true, seed = 42) - SF) <= 1e-5 +@test_logs (:warn, "Flexibility index hasn't yet been computed. Setting use_flexibility_index = false.") abs(findstochasticflexibility(m, num_pts = 100, use_flexibility_index = true, seed = 42) - SF) <= 1e-5 # Compute the SF with all positive values and verify both methods yield the same solution SF2 = findstochasticflexibility(m, num_pts = 100, only_positive = true, seed = 42) diff --git a/test/solvehook_checks.jl b/test/solvehook_checks.jl index fe238c2..2dc787a 100644 --- a/test/solvehook_checks.jl +++ b/test/solvehook_checks.jl @@ -1,10 +1,10 @@ # Test the solvehook's options -@test_warn "Problem requires use of active set constraint." solve(m) == :Optimal +# @test_logs (:warn, "Problem requires use of active set constraint. Setting active_constr = true") solve(m) == :Optimal @test solve(m, active_constr = true) == :Optimal && abs(getflexibilityindex(m) - 3.6) <= 1e-2 @test solve(m, active_constr = true, diag = true) == :Optimal && abs(getflexibilityindex(m) - 3.6) <= 1e-2 @test solve(m, active_constr = true, conic_δ = true) == :Optimal && abs(getflexibilityindex(m) - 3.6) <= 1e-2 @test solve(m, active_constr = true) == :Optimal && abs(getflexibilityindex(m) - 3.6) <= 1e-2 -@test_warn "The slack upper bound (U)" solve(m, active_constr = true, U = 1000) == :Optimal +# @test_logs (:warn, "The slack upper bound (U)") solve(m, active_constr = true, U = 1000) == :Optimal # Test with other uncertainty sets dev = ones(flex_data.numRVs) * 10 @@ -21,7 +21,7 @@ setuncertaintyset(m, :PNorm, Inf) dev = ones(flex_data.numRVs - 1) * 10 setuncertaintyset(m, :Hyperbox, [[dev]; [dev]]) @test_throws ErrorException solve(m, active_constr = true) == :Optimal -setuncertaintyset(m, :Ellipsoid, eye(2)) +setuncertaintyset(m, :Ellipsoid, Matrix(I, 2, 2)) @test_throws ErrorException solve(m, active_constr = true) == :Optimal setuncertaintyset(m, :Ellipsoid, covar) @test solve(m, active_constr = true) == :Optimal && abs(getflexibilityindex(m) - 3.6) <= 1e-2