-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathstand-up.py
executable file
·836 lines (732 loc) · 40.7 KB
/
stand-up.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
#!/usr/bin/env python3
import os.path, socket
from os.path import expanduser
import secrets
import sys, os, stat, signal, time
import json, base64, hashlib
import argparse
import coloredlogs, logging
import ipaddress
logger = logging.getLogger(__name__)
argparser = argparse.ArgumentParser()
argparser.add_argument("--target", "-t", default='digitalocean', help="which provider to use (default: digitalocean)", choices=['digitalocean', 'gcloud', 'sporestack', 'manual'])
argparser.add_argument("--digitalocean-api-key", help="API key for digitalocean")
argparser.add_argument("--gcloud-api-key-file", help="API key file for GCloud")
argparser.add_argument("--gcloud-project-id", help="Project ID for GCloud (default: first available project id)")
argparser.add_argument("--sporestack-days", help="How many days to prepay sporestack instance", default=1, type=int)
argparser.add_argument("--sporestack-currency", help="Which currency to use for payment", default='btc', choices=['btc', 'bch', 'bsv', 'xmr'])
argparser.add_argument("--instance-ip", help="Instance IP if manual mode is used")
argparser.add_argument("--name", "-n", help="slug name (default: %(default)s)", default='investig')
argparser.add_argument("--region", "-r", help="region or zone (default: selects random region/zone)", default='random')
argparser.add_argument("--size", "-s", help="slug size or machine type (default: %(default)s)", default='2gb')
argparser.add_argument("--image", help="slug image (default: %(default)s)", default='ubuntu-20-04-x64')
argparser.add_argument("--user", "-u", help="username to use for ssh connection (default: %(default)s)", default='root')
argparser.add_argument("--ssh-port", help="port to use for ssh connection (default: %(default)s)", default=22, type=int)
argparser.add_argument("--ssh-connection-tries", help="how many times to try to establish ssh connection (default: %(default)s)", default=30, type=int)
argparser.add_argument("--ssh-wait-for-auth", help="retry in case of failed authentication upon establishing ssh session", action='store_true')
argparser.add_argument("--tool", help="additonal tools to install", action='append')
argparser.add_argument("--repo", help="additonal repos to install", action='append')
argparser.add_argument("--service", help="service to install", action='append', choices=['ipsec', 'proxy', 'shadowsocks', 'wireguard', 'ssh-pivot'])
argparser.add_argument("--wallet", help="wallet to install", action='append', choices=['monero'])
argparser.add_argument("--force", help="overwrite existing incstances", action='store_true')
argparser.add_argument("--destroy", help="destroy existing incstances", action='store_true')
argparser.add_argument("--bare", "-b", help="create bare instance", action='store_true')
argparser.add_argument("--no-kali", help="do not bootstrap kali", action='store_true')
argparser.add_argument("--compose-version", help="compose version (default: %(default)s)", default='1.25.0')
argparser.add_argument("--verbose", "-v", action='count', default=0)
argparser.add_argument("--quiet", "-q", help="only display errors and IP", action='store_true')
argparser.add_argument("--ssh-private-key", "-i", help="SSH key to access instance (default: %(default)s)", default=expanduser("~") + '/.ssh/id_rsa')
argparser.add_argument("--create-private-key", help="create ssh key to access instance", action='store_true')
args = argparser.parse_args()
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(len(levels)-1,args.verbose)]
coloredlogs.install(level=level)
cloudkeys = {}
ipsec_vpn_compose = "ipsec.yml"
wireguard_config_script = "wireguard.sh"
wireguard_port = 1194
configfile = expanduser("~") + '/.config/investiGator.json'
def signal_handler(sig, frame):
cleanup_and_die('Interrupt signal triggered')
signal.signal(signal.SIGINT, signal_handler)
# signal.pause()
def cleanup_and_die(msg):
if args.target == 'digitalocean':
try:
droplet
if droplet.id != None:
logger.critical("calling destroy() on instance id {}".format(droplet.id))
droplet.destroy()
except NameError:
logger.debug('no instance has been created yet')
elif args.target == 'gcloud':
try:
operation_create
logger.critical("calling delete() on instance name {}".format(config['name']))
operation = gce_manager.instances().delete(project=config['gcloud_project_id'], zone=config['region'], instance=config['name']).execute()
gcloud_wait(gce_manager, config['region'], operation['name'])
except NameError:
logger.debug('no instance has been created yet')
elif args.target == 'sporestack':
try:
instance_dict
machine_info = sporestack.client.get_machine_info(config['name'])
logger.critical("calling delete() on instance id {}".format(machine_info['machine_id']))
sporestack.client.delete(config['name'], sporestack.client.API_ENDPOINT)
except NameError:
logger.debug('no instance has been created yet')
try:
addKey
logger.critical("calling destroy() on added ssh key object fingerprint {} and name {}".format(addKey.fingerprint, addKey.name))
addKey.destroy()
except NameError:
pass
logger.critical(msg)
sys.exit(2)
try:
import paramiko
except ImportError:
cleanup_and_die("please install the paramiko module: 'pip install -U paramiko'")
try:
import digitalocean
except ImportError:
cleanup_and_die("please install the digitalocean module: 'pip install -U python-digitalocean'")
try:
import sporestack
except ImportError:
cleanup_and_die("please install the sporestack module: 'pip install -U sporestack'")
try:
from google.oauth2 import service_account
import googleapiclient.discovery
except ImportError:
cleanup_and_die("please install the gcloud module: 'pip install -U google-api-python-client'")
def write_config(configdict, configfile):
for nosave in ["bare", "no_kali", "create_private_key", "destroy", "force", "instance_ip", "name", "quiet", "verbose", "digitalocean_api_key", "gcloud_api_key_file", "gcloud_api_key_file", "ssh_wait_for_auth"]:
if nosave in configdict: del configdict[nosave]
if not os.path.exists(os.path.dirname(configfile)):
os.mkdir(os.path.dirname(configfile))
with open(configfile, 'w') as configfilefd:
json.dump(configdict, configfilefd, sort_keys=True, indent=2)
def read_config(config_file):
with open(configfile, 'r') as configfilefd:
config = json.load(configfilefd)
return config
def keyToFingerprint(publicKey):
key = base64.b64decode(publicKey.strip().encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))
if vars(args).get('no_kali') and not vars(args).get('bare'):
logger.critical("if instance is requested as no-kali, default tools can not be installed, enabling bare mode")
args.bare = True
if os.path.exists(configfile):
config = read_config(configfile)
if not 'cloudkeys' in config:
config['cloudkeys'] = cloudkeys
else:
cloudkeys = config['cloudkeys']
else:
logger.info('no config file found, assuming defaults')
config = vars(args)
config['cloudkeys'] = cloudkeys
write_config(config, configfile)
config = {**config, **vars(args)}
if vars(args).get('quiet') and config['verbose'] > 0:
cleanup_and_die("options quiet and verbose are mutually exclusive")
logger.debug(config)
if os.path.exists(config['ssh_private_key']):
statr = os.stat(config['ssh_private_key']).st_mode
if not stat.filemode(statr) == '-rw-------':
cleanup_and_die('the permissions for the private key at {} are too loose'.format(config['ssh_private_key']))
try:
pKey = paramiko.RSAKey.from_private_key_file(config['ssh_private_key'])
except paramiko.ssh_exception.PasswordRequiredException:
cleanup_and_die("the private key '{}' requires a password, as of now this is not supported, please us a private key without password".format(config['ssh_private_key']))
except paramiko.ssh_exception.SSHException:
cleanup_and_die("the private key '{}' does not seem to be an RSA key in PEM and can not be supported (use 'ssh-keygen -t rsa -m pem')".format(config['ssh_private_key']))
else:
if vars(args).get('create_private_key'):
logger.info('missing private key, creating')
if not os.path.exists(os.path.dirname(config['ssh_private_key'])):
os.mkdir(os.path.dirname(config['ssh_private_key']))
os.chmod(os.path.dirname(config['ssh_private_key']), stat.S_IRWXU)
pKey = paramiko.RSAKey.generate(2048)
pKey.write_private_key_file(config['ssh_private_key'])
with open(config['ssh_private_key']+'.pub', 'w') as pubKeyFileFD:
pubKeyFileFD.write('ssh-rsa '+pKey.get_base64())
pKey = paramiko.RSAKey.from_private_key_file(config['ssh_private_key'])
else:
cleanup_and_die("missing private key")
if os.getenv('DIGITALOCEAN_API_KEY', False) and not args.digitalocean_api_key:
logger.info('using digitalocean api key from environment')
cloudkeys['digitalocean'] = os.getenv('DIGITALOCEAN_API_KEY', False)
elif vars(args).get('digitalocean_api_key'):
logger.debug("using digitalocean api key from arguments")
cloudkeys['digitalocean'] = args.digitalocean_api_key
if os.getenv('GCLOUD_API_KEY_FILE', False) and not args.gcloud_api_key_file:
logger.info('using GCloud api key file from environment')
cloudkeys['gcloud'] = os.getenv('GCLOUD_API_KEY_FILE', False)
elif vars(args).get('gcloud_api_key_file'):
logger.debug("using GCloud api key file from arguments")
cloudkeys['gcloud'] = args.gcloud_api_key_file
if config['target'] == 'gcloud':
if config['size'] == argparser.get_default('size'):
logger.debug('changing default size to "g1-small" to work for gcloud')
config['size'] = 'g1-small'
if config['target'] == 'sporestack':
sporestack_regions = ['random', 'sfo2', 'nyc1', 'nyc3', 'tor1', 'lon1', 'ams3', 'fra1', 'sgp1', 'blr1']
if vars(args).get('region'):
if config['region'] not in sporestack_regions:
cleanup_and_die('the requested region "{}" is not amongst the available regions\n{}'.format(config['region'], sporestack_regions))
if config['region'] == 'random':
config['region'] = None
if vars(args).get('image'):
logger.info('image was set on the cli, but with sporestack we only support ubuntu 16.04')
if vars(args).get('size'):
logger.info('size was set on the cli, but with sporestack we only support the 1GB default size')
logger.info("validating settings")
def gcloud_wait(gce_manager, zone, operation):
logger.debug("waiting for operation '{}'".format(operation))
iteration = 1
while True and iteration <= 60:
result = gce_manager.zoneOperations().get(project=config['gcloud_project_id'], zone=zone, operation=operation).execute()
if result['status'] == 'DONE':
logger.debug("done waiting for operation '{}'".format(operation))
if 'error' in result:
raise Exception(result['error'])
return result
iteration += 1
time.sleep(1)
def printProgressBar(iteration, total=24, length = 100, fill = '█'):
if vars(args).get('quiet'):
return
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(0) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % ('Progress:', bar, percent, 'Complete'), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def validate_gcloud():
if not 'gcloud' in cloudkeys:
cleanup_and_die('can not find GCloud API key file, please supply via CLI, config file or environment variable "GCLOUD_API_KEY"')
if not config['gcloud_project_id']:
with open(cloudkeys['gcloud']) as gcloud_json_file:
gcloud_json = json.load(gcloud_json_file)
if gcloud_json['project_id']:
config['gcloud_project_id'] = gcloud_json['project_id']
logger.info("extraced GCloud project ID from key file")
else:
cleanup_and_die("missing GCloud project ID, please look up or create a project in the GCloud console")
try:
gc_credentials = service_account.Credentials.from_service_account_file(cloudkeys['gcloud'])
except FileNotFoundError:
cleanup_and_die("GCloud API Key file was not found")
except (UnicodeDecodeError,json.decoder.JSONDecodeError, ValueError):
cleanup_and_die("GCloud API Key file has to be json format service account credential file")
gce_manager = googleapiclient.discovery.build('compute', 'v1', credentials=gc_credentials, cache_discovery=False)
try:
raw_all_instances = gce_manager.instances().aggregatedList(project=config['gcloud_project_id']).execute()
except googleapiclient.errors.HttpError as Message:
reason = json.loads(Message.content).get('error').get('message')
if reason.startswith('Failed to find project'):
cleanup_and_die("GCloud Project not found, please specify full id (e.g. some-name-1234)")
else:
cleanup_and_die("Error querying instances, please check project ID and key/service account permissions:\n{}".format(Message))
printProgressBar(1)
for key in raw_all_instances['items'].keys():
if 'warning' in raw_all_instances['items'][key]:
if raw_all_instances['items'][key]['warning']['code'] == 'NO_RESULTS_ON_PAGE':
continue
for instance in raw_all_instances['items'][key]['instances']:
if instance['name'] == config['name']:
logger.warning('the requested name "{}" is already taken by an instance in the zone {}'.format(config['name'], key))
if vars(args).get('force'):
logger.warning('force option is set, calling delete() on existing instance')
try:
operation = gce_manager.instances().delete(project=config['gcloud_project_id'], zone=key.replace('zones/', ''), instance=config['name']).execute()
gcloud_wait(gce_manager, key.replace('zones/', ''), operation['name'])
except Exception as Message:
cleanup_and_die('got exception trying to delete existing instance: "{}"'.format(Message))
else:
exit(1)
printProgressBar(2)
raw_regions = gce_manager.zones().list(project=config['gcloud_project_id']).execute()
regions = list()
for region in raw_regions['items']:
regions.append(region['name'])
regions.sort()
if config['region'] == argparser.get_default('region'):
config['region'] = secrets.choice(regions)
logger.debug("default region selected, selecting random region '{}'".format(config['region']))
if not config['region'] in regions:
cleanup_and_die('the requested zone "{}" is not amongst the available zones\n{}'.format(config['region'], regions))
printProgressBar(3)
raw_machine_types = gce_manager.machineTypes().list(project=config['gcloud_project_id'], zone=config['region']).execute()
machine_types = list()
machine_types_pretty = ""
for machine_type in raw_machine_types['items']:
machine_types.append(machine_type['name'])
machine_types_pretty += "{}\t\t{}\n".format(machine_type['name'], machine_type['description'])
if not config['size'] in machine_types:
cleanup_and_die('the requested machine type "{}" is not amongst the available machine types for this region\n{}'.format(config['size'], machine_types_pretty))
printProgressBar(4)
if config['image'] == argparser.get_default('image'):
logger.debug('selecting ubuntu 16.04 lts as default for gcloud')
select_image = gce_manager.images().getFromFamily(project='ubuntu-os-cloud', family='ubuntu-1604-lts').execute()
config['image'] = select_image['name']
else:
raw_images_ubuntu = gce_manager.images().list(project='ubuntu-os-cloud').execute()
# raw_images_debian = gce_manager.images().list(project='debian-cloud').execute()
images = list()
for image_ubuntu in raw_images_ubuntu['items']:
if 'deprecated' in image_ubuntu:
if image_ubuntu['deprecated']['state'] == 'DEPRECATED':
continue
images.append(image_ubuntu['name'])
# for image_debian in raw_images_debian['items']:
# if 'deprecated' in image_debian:
# if image_debian['deprecated']['state'] == 'DEPRECATED':
# continue
# images.append(image_debian['name'])
images.sort()
if config['image'] not in images:
cleanup_and_die("the requested image '{}' is not amongst the available images\n{}".format(config['image'], images))
return gce_manager
def validate_digitalocean():
if not 'digitalocean' in cloudkeys:
cleanup_and_die('can not find digitalocean api key, please supply via CLI, config file or environment variable "DIGITALOCEAN_API_KEY"')
do_manager = digitalocean.Manager(token=cloudkeys['digitalocean'])
try:
raw_instances = do_manager.get_data("droplets/")
except (digitalocean.DataReadError,digitalocean.TokenError) as Message:
cleanup_and_die('got exception connecting to cloud provider "{}"'.format(Message))
printProgressBar(1)
instances = list()
for instance in raw_instances['droplets']:
instances.append(instance['name'])
if config['name'] in instances and not config['destroy']:
logger.warning('the requested name "{}" is already taken'.format(config['name']))
if vars(args).get('force'):
raw_instances = do_manager.get_data("droplets/")
for instance in raw_instances['droplets']:
if instance['name'] == config['name']:
existing_instance = do_manager.get_droplet(instance['id'])
logger.warning('force option is set, calling destroy() on existing instance id {}'.format(existing_instance.id))
existing_instance.destroy()
break
else:
exit(1)
if vars(args).get('destroy'):
raw_instances = do_manager.get_data("droplets/")
for instance in raw_instances['droplets']:
if instance['name'] == config['name']:
existing_instance = do_manager.get_droplet(instance['id'])
existing_instance.destroy()
cleanup_and_die("destroyed instance id {}, aborting".format(instance['id']))
cleanup_and_die("no instance with name {} found, aborting".format(config['name']))
printProgressBar(2)
raw_regions = do_manager.get_data("regions/")
regions = list()
for region in raw_regions['regions']:
regions.append(region['slug'])
if config['region'] == argparser.get_default('region'):
config['region'] = secrets.choice(regions)
logger.debug("no region selected, selecting random region '{}'".format(config['region']))
for region in raw_regions['regions']:
if region['slug'] == config['region']:
if not config['size'] in region['sizes']:
cleanup_and_die('the requested size "{}" is not amongst the available sizes for this region\n{}'.format(config['size'], region['sizes']))
if not config['region'] in regions:
cleanup_and_die('the requested region "{}" is not amongst the available regions\n{}'.format(config['region'], regions))
printProgressBar(3)
raw_images = do_manager.get_data("images/")
images = list()
for image in raw_images['images']:
images.append(image['slug'])
if not config['image'] in images:
cleanup_and_die('the requested image "{}" is not amongst the available images\n{}'.format(config['image'], images))
return do_manager
printProgressBar(0)
if args.target == 'digitalocean':
do_manager = validate_digitalocean()
printProgressBar(4)
raw_keys = do_manager.get_data("account/keys/")
keys_fingerprints = list()
for key in raw_keys['ssh_keys']:
keys_fingerprints.append(key['fingerprint'])
if not keyToFingerprint(pKey.get_base64()) in keys_fingerprints:
logger.info('adding ssh key')
addKey = digitalocean.SSHKey(token=cloudkeys['digitalocean'],
name='key-uploaded-by-investiGator',
public_key='ssh-rsa '+pKey.get_base64()
)
addKey.create()
printProgressBar(5)
logger.info("creating instance")
all_keys = do_manager.get_all_sshkeys()
droplet = digitalocean.Droplet(
token=cloudkeys['digitalocean'],
name=config['name'],
region=config['region'],
image=config['image'],
size_slug=config['size'],
ssh_keys=all_keys
)
printProgressBar(6)
droplet.create()
logger.info("waiting for instance to come online")
actions = droplet.get_actions()
for action in actions:
action.load()
action.wait()
if action.status == 'in-progress':
logger.debug("droplet not yet up, waiting some more")
actions = droplet.get_actions()
for action in actions:
action.load()
action.wait()
droplet.load()
if droplet.status != 'active' and droplet.status != 'new':
cleanup_and_die('something went wrong creating the instance, the status is "{}"'.format(droplet.status))
waitForNetworkingCount = 0
waitForNetworkingMax = 10
while droplet.ip_address is None and waitForNetworkingCount < waitForNetworkingMax:
waitForNetworkingCount += 1
logger.debug("the droplet does not report back an external IP, waiting {}/{}".format(waitForNetworkingCount, waitForNetworkingMax))
time.sleep(2)
droplet.load()
instance_ip = droplet.ip_address
if instance_ip is None:
print(vars(droplet))
cleanup_and_die('something went wrong, the droplet does not report back an external IP: {}'.format(vars(droplet)))
logger.info("instance with id {} has external IP {}".format(droplet.id, instance_ip))
printProgressBar(7)
try:
addKey
logger.critical("calling destroy() on added ssh key object fingerprint {} and name {}".format(addKey.fingerprint, addKey.name))
addKey.destroy()
del addKey
except NameError:
pass
elif args.target == 'gcloud':
gce_manager = validate_gcloud()
printProgressBar(5)
if vars(args).get('destroy'):
# operation = gce_manager.instances().delete(project=config['gcloud_project_id'], zone=config['region'], instance=config['name']).execute()
# gcloud_wait(gce_manager, operation['name'])
cleanup_and_die("destroyed instances, aborting")
try:
check_image = gce_manager.images().get(project='ubuntu-os-cloud', image=config['image']).execute()
except Exception as Message:
cleanup_and_die('image invalid: "{}"\nvalid images:\n'.format(Message, images))
printProgressBar(6)
source_disk_image = check_image['selfLink']
logger.info("creating instance")
machine_type = "zones/{}/machineTypes/{}".format(config['region'], config['size'])
instance_config = {
'name': config['name'],
'machineType': machine_type,
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
'metadata': {
'items': [{
'key': 'ssh-keys',
'value': 'root:ssh-rsa '+pKey.get_base64()+' root'
}]
}
}
printProgressBar(7)
operation_create = gce_manager.instances().insert(project=config['gcloud_project_id'], zone=config['region'], body=instance_config).execute()
logger.info("waiting for instance to come online")
status = gcloud_wait(gce_manager, config['region'], operation_create['name'])
if status['status'] != "DONE":
cleanup_and_die('something went wrong creating the instance: "{}"'.format(error))
instance = gce_manager.instances().get(project=config['gcloud_project_id'], zone=config['region'], instance=config['name']).execute()
instance_ip = instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
logger.info("instance with id {} has external IP {}".format(instance['id'], instance_ip))
printProgressBar(8)
time.sleep(5)
elif args.target == 'manual':
if not config['instance_ip']:
cleanup_and_die("instance IP has to be specified in manual mode")
try:
instance_ip = ipaddress.ip_address(socket.gethostbyname(config['instance_ip'])).compressed
except ValueError:
cleanup_and_die("invalid IP specified \"{}\"".format(config['instance_ip']))
logger.info("setting up existing instance on IP {}".format(instance_ip))
elif args.target == 'sporestack':
if vars(args).get('destroy'):
if sporestack.client.machine_exists(config['name']):
machine_info = sporestack.client.get_machine_info(config['name'])
sporestack.client.delete(config['name'], sporestack.client.API_ENDPOINT)
cleanup_and_die("destroyed instance id {}, aborting".format(machine_info['machine_id']))
else:
cleanup_and_die("no instance with name {} found, aborting".format(config['name']))
if sporestack.client.machine_exists(config['name']):
machine_info = sporestack.client.get_machine_info(config['name'])
logger.warning('the requested name "{}" is already taken'.format(config['name']))
if vars(args).get('force'):
sporestack.client.delete(config['name'], sporestack.client.API_ENDPOINT)
logger.warning('force option is set, calling destroy() on existing instance id {}'.format(machine_info['machine_id']))
else:
exit(1)
try:
instance_dict = sporestack.client.launch(config['name'],
config['sporestack_days'], # days,
5, # disk,
1, # memory,
'/32', # ipv4,
'/128', # ipv6,
1, # bandwidth,
'digitalocean.sporestack.com', # host=None,
sporestack.client.API_ENDPOINT, # api_endpoint=API_ENDPOINT,
1, # cores=1,
config['sporestack_currency'], # currency='bch',
config['region'], # region=None,
False, # managed=False,
None, # organization=None,
None, # override_code=None,
None, # settlement_token=None,
None, # qemuopts=None,
False, # hostaccess=False,
None, # ipxescript=None,
False, # ipxescript_stdin=False,
None, # ipxescript_file=None,
'ubuntu-16-04', # operating_system=None,
'ssh-rsa '+pKey.get_base64(), # ssh_key=None,
None, # ssh_key_file=None,
None, # walkingliberty_wallet=None,
False, # want_topup=False,
False, # save=True):
)
instance_ip = ipaddress.ip_address(instance_dict['network_interfaces'][0]['ipv4']).compressed
except Exception as msg:
cleanup_and_die('something went wrong while talking to the sporestack API: "{}"'.format(msg))
else:
cleanup_and_die("no target specified")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
logger.info("connecting to instance via SSH")
i = 1;
while True:
try:
ssh.connect(instance_ip, config['ssh_port'], config['user'], None, pKey, None, 15)
break
except paramiko.ssh_exception.AuthenticationException:
if vars(args).get('ssh_wait_for_auth'):
logger.debug("authentication failed with given private key, retrying")
i += 1
time.sleep(1)
else:
cleanup_and_die("Authentication failed with given private key, please ensure public was properly set")
except (paramiko.ssh_exception.NoValidConnectionsError):
logger.debug("connection failed, retrying")
i += 1
time.sleep(1)
except (TimeoutError):
logger.debug("timeout connecting, retrying")
i += 1
time.sleep(1)
except Exception as e:
if str(e) == 'timed out':
logger.debug("timeout connecting, retrying")
i += 1
time.sleep(1)
else:
cleanup_and_die("Exception occured while connecting: \"{}\"".format(str(e)))
if i >= config['ssh_connection_tries']:
cleanup_and_die("unable to connect to {} via SSH within time limit of {} seconds".format(instance_ip, config['ssh_connection_tries']))
printProgressBar(9)
logger.info("setting up system")
stdin, stdout, stderr = ssh.exec_command("echo LC_ALL=\"en_US.UTF-8\" >> /etc/default/locale; \
apt-get -q -o DPkg::Lock::Timeout=60 update && apt-get -o DPkg::Lock::Timeout=60 install -yq curl software-properties-common dirmngr")
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
logger.info("setting up docker")
stdin, stdout, stderr = ssh.exec_command("docker -v >/dev/null 2>&1 || curl https://get.docker.com | bash")
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(10)
stdin, stdout, stderr = ssh.exec_command("docker-compose -v >/dev/null || curl -L \"https://github.com/docker/compose/releases/download/{}/docker-compose-Linux-x86_64\" -o /usr/local/bin/docker-compose && \
chmod +x /usr/local/bin/docker-compose && echo \"alias dc='docker-compose'\" >> ~/.bash_aliases && \
echo \"set expandtab\nset shiftwidth=2\nset softtabstop=2\nset background=dark\nsyntax on\" >> ~/.vimrc".format(config['compose_version']))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(11)
if not config['no_kali']:
logger.info('bootstrapping kali')
stdin, stdout, stderr = ssh.exec_command("curl -fsSL https://archive.kali.org/archive-key.asc | apt-key add - && \
echo \"deb http://http.kali.org/kali kali-rolling main non-free contrib\" > /etc/apt/sources.list.d/kali.list && \
echo \"deb-src http://http.kali.org/kali kali-rolling main non-free contrib\" >> /etc/apt/sources.list.d/kali.list && \
export DEBIAN_FRONTEND=noninteractive; apt-get -q update && \
apt-get -o Dpkg::Options::=\"--force-overwrite\" -yq install console-setup-linux software-properties-common")
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(12)
else:
logger.debug("not bootstrapping kali")
config['no_kali'] = False
if not config['bare']:
standard_tools="nmap git wpscan exploitdb hashcat hydra gobuster crunch lynx seclists wordlists dirb wfuzz"
standard_repos=['magnumripper/JohnTheRipper', 'erwanlr/Fingerprinter', 'laramies/theHarvester']
printProgressBar(13)
logger.info('installing tools "{}"'.format(standard_tools))
stdin, stdout, stderr = ssh.exec_command("export DEBIAN_FRONTEND=noninteractive; apt-get -yq install {}".format("zlib1g-dev ruby-dev python3-pip"))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
for tool in standard_tools.split():
logger.info('installing "{}"'.format(tool))
stdin, stdout, stderr = ssh.exec_command("export DEBIAN_FRONTEND=noninteractive; apt-get -yq install {}".format(tool))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(14)
for repo in standard_repos:
logger.info('installing repo "{}"'.format(repo))
stdin, stdout, stderr = ssh.exec_command("git clone https://github.com/{}.git".format(repo))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(15)
else:
logger.debug("not installing standard tools and repos, bare instance")
config['bare'] = False
if vars(args).get('tool'):
for tool in config['tool']:
logger.info('installing additional tool "{}"'.format(tool))
stdin, stdout, stderr = ssh.exec_command("export DEBIAN_FRONTEND=noninteractive; apt-get -yq install {}".format(tool))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
config['tool'] = False
printProgressBar(16)
if vars(args).get('repo'):
for repo in config['repo']:
logger.info('installing additional repo "{}"'.format(repo))
stdin, stdout, stderr = ssh.exec_command("git clone https://github.com/{}.git".format(repo))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
config['repo'] = False
printProgressBar(17)
if vars(args).get('wallet'):
for item in config['wallet']:
logger.info('installing wallet {}'.format(item))
if item == 'monero':
stdin, stdout, stderr = ssh.exec_command("curl -L -o linux64.tar.bz2 https://downloads.getmonero.org/cli/linux64 && tar xf linux64.tar.bz2 && rm linux64.tar.bz2")
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(18)
print("monero wallet installed, to use with external node and restore from keys, run:\n## ATTENTION: using a remote node might compromise your privacy!\ncd ~/monero-x86_64-linux-gnu; ./monero-wallet-cli --daemon-address node.moneroworld.com:18089 --generate-from-keys restored-wallet")
if vars(args).get('service'):
for service in config['service']:
logger.info('installing service {}'.format(service))
if service == 'ipsec':
ssh.exec_command("mkdir -p /root/vpn")
sftp = ssh.open_sftp()
sftp.put(ipsec_vpn_compose, "/root/vpn/"+ipsec_vpn_compose)
sftp.close()
stdin, stdout, stderr = ssh.exec_command("cd /root/vpn && /usr/local/bin/docker-compose -f {} up -d".format(ipsec_vpn_compose))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(19)
print("IPSec VPN Server set up at "+instance_ip)
if service == 'shadowsocks':
try:
import strings
import secrets
alphabet = string.ascii_letters + string.digits
shadowsocks_password = ''.join(secrets.choice(alphabet) for i in range(16))
except (ImportError, ModuleNotFoundError):
logger.warning("strings and secrets module not found, falling back to insecure password generation")
shadowsocks_password = hashlib.sha256(time.asctime().encode('utf-8')).hexdigest()
stdin, stdout, stderr = ssh.exec_command("docker run -e PASSWORD={} -e METHOD=aes-256-gcm -p443:8388 -p443:8388/udp -d shadowsocks/shadowsocks-libev".format(shadowsocks_password))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(20)
print("ShadowSocks Server set up at {}, on the client install using apt 'apt install shadowsocks-libev'".format(instance_ip))
print("\n# ss-local -l 1080 -m aes-256-gcm -s {} -p {} -k {}\n".format(instance_ip, 443, shadowsocks_password))
if service == 'proxy':
try:
import strings
import secrets
alphabet = string.ascii_letters + string.digits
proxy_password = ''.join(secrets.choice(alphabet) for i in range(16))
except (ImportError, ModuleNotFoundError):
logger.warning("strings and secrets module not found, falling back to insecure password generation")
proxy_password = hashlib.sha256(time.asctime().encode('utf-8')).hexdigest()
stdin, stdout, stderr = ssh.exec_command("docker run -e PROXY_PASSWORD={} -e PROXY_USER=user -p{}:{} -d serjs/go-socks5-proxy".format(proxy_password, 1080, 1080))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
printProgressBar(21)
print("Proxy Server set up at {}, use in proxychains like this:".format(instance_ip))
print("\nsocks5 {} {} user {}".format(instance_ip, 1080, proxy_password))
if service == 'wireguard':
stdin, stdout, stderr = ssh.exec_command("export DEBIAN_FRONTEND=noninteractive; apt-get -yq install wireguard")
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
sftp = ssh.open_sftp()
sftp.put(wireguard_config_script, "/root/"+wireguard_config_script)
sftp.close()
stdin, stdout, stderr = ssh.exec_command("bash {} {} {} && wg-quick up wg0".format("/root/"+wireguard_config_script, instance_ip, wireguard_port))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
stdin, stdout, stderr = ssh.exec_command("cat ~/wg0.conf")
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
stdout = "".join(stdout.readlines())
logger.debug(stdout)
wireguard_client_config = stdout
printProgressBar(22)
print("on the client install wireguard, save the config and run wg-quick up wg0 as root")
print("\n####\n\nOn Ubuntu run:\nsudo apt-get -yq install software-properties-common && sudo add-apt-repository -yu ppa:wireguard/wireguard && sudo apt-get -yq install wireguard")
print("\ncat << 'EOF' | sudo tee /etc/wireguard/wg0.conf\n{}\nEOF".format(wireguard_client_config))
print("\nsudo wg-quick up wg0")
if service == 'ssh-pivot':
stdin, stdout, stderr = ssh.exec_command("docker run --name {} -d -p{}:22 nikosch86/docker-socks".format("ssh-pivot", 8022))
logger.debug("".join(stdout.readlines()))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
stdin, stdout, stderr = ssh.exec_command("docker logs {} 2>&1".format("ssh-pivot"))
if stdout.channel.recv_exit_status() > 0: logger.critical("STDERR of setup command: {}".format(stderr.read()))
stdout = "".join(stdout.readlines())
logger.debug(stdout)
ssh_pivot_out = stdout
printProgressBar(23)
print("SSH Pivot Server set up, stdout:\n{}".format(ssh_pivot_out))
print("\nSSH socks can be used in proxychains like this:\nsocks5 127.0.0.1 1080")
#
# if service == 'openvpn':
else:
printProgressBar(24)
ssh.close()
write_config(config, configfile)
if vars(args).get('quiet'):
print("{}".format(instance_ip))
else:
print("\n###\n# ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p{} -i {} {}@{}".format(config['ssh_port'], config['ssh_private_key'], config['user'], instance_ip))