Skip to content

Commit 73fd3bf

Browse files
Merge pull request #279 from marcoq/marcoq-SectionSampler
Section sampler
2 parents 167fcb3 + 096f369 commit 73fd3bf

7 files changed

+187
-5
lines changed

docs/src/samples.md

+6
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@ LowDiscrepancySample{T}
3434
sample(n,lb,ub,S::LowDiscrepancySample)
3535
```
3636

37+
* Sample on section
38+
```@docs
39+
SectionSample
40+
sample(n,lb,ub,S::SectionSample)
41+
```
42+
3743
## Adding a new sampling method
3844

3945
Adding a new sampling method is a two- step process:

src/NeuralSurrogate.jl

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
using Flux
22
using Flux: @epochs
3+
34
mutable struct NeuralSurrogate{X,Y,M,L,O,P,N,A,U} <: AbstractSurrogate
45
x::X
56
y::Y

src/Optimization.jl

+83-4
Original file line numberDiff line numberDiff line change
@@ -697,11 +697,11 @@ function surrogate_optimize(obj::Function,::DYCORS,lb::Number,ub::Number,surr1::
697697
while new_points[i] < lb || new_points[i] > ub
698698
if new_points[i] > ub
699699
#reflection
700-
new_points[i] = maximum(surr1.x) - norm(new_points[i] - maximum(surr1.x))
700+
new_points[i] = max(lb, maximum(surr1.x) - norm(new_points[i] - maximum(surr1.x)))
701701
end
702702
if new_points[i] < lb
703703
#reflection
704-
new_points[i] = minimum(surr1.x) + norm(new_points[i]-minimum(surr1.x))
704+
new_points[i] = min(ub, minimum(surr1.x) + norm(new_points[i]-minimum(surr1.x)))
705705
end
706706
end
707707
end
@@ -831,10 +831,10 @@ function surrogate_optimize(obj::Function,::DYCORS,lb,ub,surrn::AbstractSurrogat
831831
for j = 1:d
832832
while new_points[i,j] < lb[j] || new_points[i,j] > ub[j]
833833
if new_points[i,j] > ub[j]
834-
new_points[i,j] = maximum(surrn.x)[j] - norm(new_points[i,j] - maximum(surrn.x)[j])
834+
new_points[i,j] = max(lb[j], maximum(surrn.x)[j] - norm(new_points[i,j] - maximum(surrn.x)[j]))
835835
end
836836
if new_points[i,j] < lb[j]
837-
new_points[i,j] = minimum(surrn.x)[j] + norm(new_points[i]-minimum(surrn.x)[j])
837+
new_points[i,j] = min(ub[j], minimum(surrn.x)[j] + norm(new_points[i]-minimum(surrn.x)[j]))
838838
end
839839
end
840840
end
@@ -1711,3 +1711,82 @@ end
17111711
end
17121712
return pareto_set,pareto_front
17131713
end
1714+
1715+
1716+
function surrogate_optimize(obj::Function,::EI,lb,ub,krig,sample_type::SectionSample;maxiters=100,num_new_samples=100)
1717+
dtol = 1e-3*norm(ub-lb)
1718+
eps = 0.01
1719+
for i = 1:maxiters
1720+
d = length(krig.x)
1721+
new_sample = sample(num_new_samples,lb,ub,sample_type)
1722+
f_max = maximum(krig.y)
1723+
evaluations = zeros(eltype(krig.x[1]),num_new_samples)
1724+
point_found = false
1725+
new_x_max = zero(eltype(krig.x[1]))
1726+
new_y_max = zero(eltype(krig.x[1]))
1727+
diff_x = zeros(eltype(krig.x[1]),d)
1728+
while point_found == false
1729+
for j = 1:length(new_sample)
1730+
std = std_error_at_point(krig,new_sample[j])
1731+
u = krig(new_sample[j])
1732+
if abs(std) > 1e-6
1733+
z = (u - f_max - eps)/std
1734+
else
1735+
z = 0
1736+
end
1737+
evaluations[j] = (u-f_max-eps)*cdf(Normal(),z) + std*pdf(Normal(),z)
1738+
end
1739+
index_max = argmax(evaluations)
1740+
x_new = new_sample[index_max]
1741+
y_new = maximum(evaluations)
1742+
for l = 1:d
1743+
diff_x[l] = norm(krig.x[l] .- x_new)
1744+
end
1745+
bit_x = diff_x .> dtol
1746+
#new_min_x has to have some distance from krig.x
1747+
if false in bit_x
1748+
#The new_point is not actually that new, discard it!
1749+
deleteat!(evaluations,index_max)
1750+
deleteat!(new_sample,index_max)
1751+
if length(new_sample) == 0
1752+
println("Out of sampling points")
1753+
return section_sampler_returner(sample_type, krig.x, krig.y, lb, ub, krig)
1754+
end
1755+
else
1756+
point_found = true
1757+
new_x_max = x_new
1758+
new_y_max = y_new
1759+
end
1760+
end
1761+
if new_y_max < 1e-6*norm(maximum(krig.y)-minimum(krig.y))
1762+
return section_sampler_returner(sample_type, krig.x, krig.y, lb, ub, krig)
1763+
end
1764+
add_point!(krig,Tuple(new_x_max),obj(new_x_max))
1765+
end
1766+
println("Completed maximum number of iterations")
1767+
end
1768+
1769+
function section_sampler_returner(
1770+
sample_type::SectionSample, surrn_x, surrn_y,
1771+
lb, ub, surrn)
1772+
d_fixed = Surrogates.fixed_dimensions(sample_type)
1773+
@assert length(surrn_y) == size(surrn_x)[1]
1774+
surrn_xy = [(surrn_x[y], surrn_y[y]) for y in 1:length(surrn_y)]
1775+
section_surr1_xy = filter(
1776+
xyz->xyz[1][d_fixed]==Tuple(sample_type.x0[d_fixed]),
1777+
surrn_xy)
1778+
section_surr1_x = [xy[1] for xy in section_surr1_xy]
1779+
section_surr1_y = [xy[2] for xy in section_surr1_xy]
1780+
if length(section_surr1_xy) == 0
1781+
@debug "No new point added - surrogate locally stable"
1782+
N_NEW_POINTS = 100
1783+
section_surr1_x = sample(N_NEW_POINTS, lb, ub, sample_type)
1784+
section_surr1_y = zeros(N_NEW_POINTS)
1785+
for i in 1:size(section_surr1_x, 1)
1786+
xi = Tuple([section_surr1_x[i, :]...])[1]
1787+
section_surr1_y[i] = surrn(xi)
1788+
end
1789+
end
1790+
index = argmin(section_surr1_y)
1791+
return (section_surr1_x[index, :][1], section_surr1_y[index])
1792+
end

src/Sampling.jl

+42
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ end
3535

3636
struct GoldenSample <: SamplingAlgorithm end
3737

38+
struct SectionSample{T} <: SamplingAlgorithm
39+
x0::Vector{T}
40+
sa::SamplingAlgorithm
41+
end
3842

3943
"""
4044
sample(n,lb,ub,S::GridSample)
@@ -236,3 +240,41 @@ function sample(n,lb,ub,G::GoldenSample)
236240
return Tuple.(y)
237241
end
238242
end
243+
244+
fixed_dimensions(
245+
section_sampler::SectionSample)::Vector{Int64} = findall(
246+
x->x == false, isnan.(section_sampler.x0))
247+
248+
free_dimensions(
249+
section_sampler::SectionSample)::Vector{Int64} = findall(
250+
x->x == true, isnan.(section_sampler.x0))
251+
252+
"""
253+
sample(n,d,K::SectionSample)
254+
255+
Returns Tuples constrained to a section.
256+
257+
In surrogate-based identification and control, optimization can alternate between unconstrained sampling in the full-dimensional parameter space, and sampling constrained on specific sections (e.g. a planes in a 3D volume),
258+
259+
A SectionSampler allows sampling and optimizing on a subset of 'free' dimensions while keeping 'fixed' ones constrained.
260+
The sampler is defined as in e.g.
261+
262+
section_sampler_y_is_10 = SectionSample([NaN64, NaN64, 10.0, 10.0], Surrogates.UniformSample())
263+
264+
where the first argument is a Vector{T} in which numbers are fixed coordinates and `NaN`s correspond to free dimensions, and the second argument is a SamplingAlgorithm which is used to sample in the free dimensions.
265+
"""
266+
function sample(n,lb,ub,section_sampler::SectionSample)
267+
if lb isa Number
268+
return rand(Uniform(lb,ub),n)
269+
else
270+
d_free = Surrogates.free_dimensions(section_sampler)
271+
new_samples = sample(n, lb[d_free], ub[d_free], section_sampler.sa)
272+
out_as_vec = repeat(section_sampler.x0', n, 1)
273+
for y in 1:size(out_as_vec,1)
274+
for xi in 1:length(d_free)
275+
out_as_vec[y,xi] = new_samples[y][xi]
276+
end
277+
end
278+
[Tuple(out_as_vec[y,:]) for y in 1:size(out_as_vec,1)]
279+
end
280+
end

src/Surrogates.jl

+1-1
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ export AbstractSurrogate, SamplingAlgorithm
9595
export Kriging, RadialBasis, add_point!, current_estimate, std_error_at_point
9696
export linearRadial,cubicRadial,multiquadricRadial,thinplateRadial
9797
export sample, GridSample, UniformSample, SobolSample, LatinHypercubeSample, LowDiscrepancySample
98-
export RandomSample, KroneckerSample, GoldenSample
98+
export RandomSample, KroneckerSample, GoldenSample, SectionSample
9999
export SRBF,LCBS,EI,DYCORS,SOP,EGO,RTEA,SMB,surrogate_optimize
100100
export LobachevskySurrogate, lobachevsky_integral, lobachevsky_integrate_dimension
101101
export LinearSurrogate

test/SectionSampleTests.jl

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
"""
2+
Sobel-sample x+y in [0,10]x[0,10],
3+
then minimize it on Section([NaN,10.0]),
4+
and verify that the minimum is on x,y=(0,10)
5+
rather than in (0,0)
6+
"""
7+
8+
using Surrogates
9+
using Test
10+
11+
lb = [ 0.0, 0.0, 0.0]
12+
ub = [10.0,10.0,10.0]
13+
x = Surrogates.sample(10,lb,ub,LatinHypercubeSample())
14+
f = x -> x[1]+x[2]+x[3]
15+
y = f.(x)
16+
f([0,0,0]) == 0
17+
18+
f_hat = Kriging(x,y,lb,ub)
19+
20+
f_hat([0,0,0])
21+
22+
isapprox(f([0,0,0]), f_hat([0,0,0]))
23+
24+
""" The global minimum is at (0,0) """
25+
26+
(xy_min, f_hat_min) = surrogate_optimize(
27+
f,
28+
DYCORS(), lb, ub,
29+
f_hat,
30+
SobolSample())
31+
32+
isapprox(xy_min[1], 0.0, atol=1e-3)
33+
34+
""" The minimum on the (0,10) section is around (0,10) """
35+
36+
section_sampler_z_is_10 = SectionSample(
37+
[NaN64, NaN64, 10.0],
38+
Surrogates.UniformSample())
39+
40+
@test [3] == Surrogates.fixed_dimensions(section_sampler_z_is_10)
41+
@test [1,2] == Surrogates.free_dimensions(section_sampler_z_is_10)
42+
43+
Surrogates.sample(5, lb, ub, section_sampler_z_is_10)
44+
45+
(xy_min, f_hat_min) = surrogate_optimize(
46+
f,
47+
EI(), lb, ub,
48+
f_hat,
49+
section_sampler_z_is_10, maxiters=1000)
50+
51+
isapprox(xy_min[1], 0.0, atol=0.1)
52+
isapprox(xy_min[2], 0.0, atol=0.1)
53+
isapprox(xy_min[3], 10.0, atol=0.1)

test/runtests.jl

+1
Original file line numberDiff line numberDiff line change
@@ -19,3 +19,4 @@ using Surrogates
1919
@testset "VariableFidelity" begin include("VariableFidelity.jl") end
2020
@testset "Earth" begin include("earth.jl") end
2121
@testset "Gradient Enhanced Kriging" begin include("GEK.jl") end
22+
@testset "Section Samplers" begin include("SectionSampleTests.jl") end

0 commit comments

Comments
 (0)