Skip to content
This repository has been archived by the owner on Jun 5, 2020. It is now read-only.

Commit

Permalink
Merge pull request #383 from ctidigital/feature/autoscale-update
Browse files Browse the repository at this point in the history
Feature/autoscale update
  • Loading branch information
hunner authored Jan 19, 2017
2 parents 7ae7ef1 + 60d91fd commit db1c767
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 1 deletion.
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -601,9 +601,15 @@ back- end instances. Accepts a hash with the following keys:
##### `load_balancers`
*Optional* A list of load balancer names that should be attached to this autoscaling group.

##### `target_groups`
*Optional* A list of ELBv2 Target Group names that should be attached to this autoscaling group.

##### `subnets`
*Optional* The subnets to associate with the autoscaling group.

##### `termination_policies`
*Optional* A list of termination policies to use when scaling in instances. For valid termination policies, see [Controlling Which Instances Auto Scaling Terminates During Scale In](http://docs.aws.amazon.com/autoscaling/latest/userguide/as-instance-termination.html).

#####`tags`
*Optional* The tags to assign to the autoscaling group. Accepts a 'key => value' hash of tags. The tags are not propagated to launched instances.

Expand Down
44 changes: 44 additions & 0 deletions lib/puppet/provider/ec2_autoscalinggroup/v2.rb
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ def self.group_to_hash(region, group)
health_check_grace_period: group.health_check_grace_period,
new_instances_protected_from_scale_in: group.new_instances_protected_from_scale_in,
load_balancers: fetch_load_balancers(autoscaling_client(region), group.auto_scaling_group_name),
target_groups: fetch_target_groups(region, group.auto_scaling_group_name),
termination_policies: group.termination_policies,
instance_count: group.instances.count,
ensure: :present,
subnets: subnet_names,
Expand All @@ -72,6 +74,17 @@ def self.fetch_load_balancers(client, name)
response.load_balancers.collect { |lb| lb.load_balancer_name }
end

def self.fetch_target_groups(region, name)
response = autoscaling_client(region).describe_load_balancer_target_groups(auto_scaling_group_name: name)
response.load_balancer_target_groups.collect { |tg| fetch_target_group_name(region, tg.load_balancer_target_group_arn) }.flatten
end

def self.fetch_target_group_name(region, value)
arn = value.is_a?(Array) ? value : [value]
response = elbv2_client(region).describe_target_groups(target_group_arns: arn)
response.target_groups.collect { |tg| tg.target_group_name }
end

def exists?
Puppet.debug("Checking if auto scaling group #{name} exists in region #{target_region}")
@property_hash[:ensure] == :present
Expand Down Expand Up @@ -177,6 +190,14 @@ def subnets=(value)
)
end

def termination_policies=(value)
policies = value.is_a?(Array) ? value : [value]
autoscaling_client(target_region).update_auto_scaling_group(
auto_scaling_group_name: name,
termination_policies: policies,
)
end

def availability_zones=(value)
zones = value.is_a?(Array) ? value : [value]
autoscaling_client(target_region).update_auto_scaling_group(
Expand All @@ -185,6 +206,29 @@ def availability_zones=(value)
)
end

def target_groups=(value)
should_names = value.is_a?(Array) ? value : [value]

response = elbv2_client(target_region).describe_target_groups(names: should_names)
should = (response.target_groups.collect { |tg| tg.target_group_arn }).to_set

response = elbv2_client(target_region).describe_target_groups(names: target_groups)
is = (response.target_groups.collect { |tg| tg.target_group_arn }).to_set

to_delete = is - should
to_add = should - is

autoscaling_client(target_region).attach_load_balancer_target_groups(
auto_scaling_group_name: name,
target_group_arns: to_add,
)
autoscaling_client(target_region).detach_load_balancer_target_groups(
auto_scaling_group_name: name,
target_group_arns: to_delete,
)

end

def load_balancers=(value)
should = (value.is_a?(Array) ? value : [value]).to_set
is = fetch_load_balancers(autoscaling_client(target_region), name).to_set
Expand Down
6 changes: 5 additions & 1 deletion lib/puppet/provider/ec2_launchconfiguration/v2.rb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,11 @@ def self.config_to_hash(region, config)
spot_price: config.spot_price,
ebs_optimized: config.ebs_optimized,
}
config[:block_device_mappings] = devices unless devices.empty?
if devices.empty?
config[:block_device_mappings] = [ ]
else
config[:block_device_mappings] = devices
end
config
end

Expand Down
18 changes: 18 additions & 0 deletions lib/puppet/type/ec2_autoscalinggroup.rb
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,24 @@ def insync?(is)
end
end

newproperty(:target_groups, :array_matching => :all) do
desc 'The target groups attached to this group.'
validate do |value|
fail 'target_groups cannot be blank' if value == ''
fail 'target_groups should be a String' unless value.is_a?(String)
end
def insync?(is)
is.to_set == should.to_set
end
end

newproperty(:termination_policies, :array_matching => :all) do
desc 'The termination policies attached to this group.'
def insync?(is)
is.to_set == should.to_set
end
end

newproperty(:subnets, :array_matching => :all) do
desc 'The subnets to associate the autoscaling group.'
validate do |value|
Expand Down
8 changes: 8 additions & 0 deletions lib/puppet_x/puppetlabs/aws.rb
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,14 @@ def elb_client(region = default_region)
self.class.elb_client(region)
end

def self.elbv2_client(region = default_region)
::Aws::ElasticLoadBalancingV2::Client.new(client_config(region))
end

def elbv2_client(region = default_region)
self.class.elbv2_client(region)
end

def self.autoscaling_client(region = default_region)
::Aws::AutoScaling::Client.new(client_config(region))
end
Expand Down

0 comments on commit db1c767

Please sign in to comment.