diff --git a/.github/workflows/test_PRs.yml b/.github/workflows/test_PRs.yml
new file mode 100644
index 0000000..a4753e4
--- /dev/null
+++ b/.github/workflows/test_PRs.yml
@@ -0,0 +1,45 @@
+# This workflow is designed to run through the process of installing, building, and executing
+# basic PMapper unittests against PMapper's supported versions when there's a new PR aiming
+# at the "master" branch
+
+name: "Test Against Pythons"
+
+on:
+ pull_request:
+ branches: [ master ]
+ workflow_dispatch:
+permissions:
+ actions: read
+ issues: write
+ contents: read
+ discussions: write
+
+jobs:
+ build_and_test:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: ["ubuntu-latest", "windows-latest", "macos-latest"]
+ python-version: ["3.6", "3.10"]
+ steps:
+ - name: "Grab Code"
+ uses: actions/checkout@v2
+
+ - name: "Install Python"
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: "Install PMapper"
+ shell: bash
+ working-directory: ${{ github.workspace }}
+ run: |
+ pip install .
+ pip show principalmapper
+
+ - name: "Run Test Cases"
+ shell: bash
+ working-directory: ${{ github.workspace }}
+ run: |
+ python -m unittest -v tests/test*
diff --git a/examples/graph_from_cf_template.py b/examples/graph_from_cf_template.py
index 028a01f..4217a47 100644
--- a/examples/graph_from_cf_template.py
+++ b/examples/graph_from_cf_template.py
@@ -144,7 +144,7 @@ def main():
edges = iam_edges.generate_edges_locally(nodes) + sts_edges.generate_edges_locally(nodes)
# Create our graph and finish
- graph = Graph(nodes, edges, policies, groups, metadata)
+ graph = Graph(nodes, edges, policies, groups, '000000000000', 'aws', metadata)
graph_actions.print_graph_data(graph)
diff --git a/principalmapper/__init__.py b/principalmapper/__init__.py
index c8764d3..4443b7a 100644
--- a/principalmapper/__init__.py
+++ b/principalmapper/__init__.py
@@ -15,4 +15,4 @@
# You should have received a copy of the GNU Affero General Public License
# along with Principal Mapper. If not, see .
-__version__ = '1.1.5'
+__version__ = '1.2.0'
diff --git a/principalmapper/__main__.py b/principalmapper/__main__.py
index 2a4e669..1f372b8 100644
--- a/principalmapper/__main__.py
+++ b/principalmapper/__main__.py
@@ -21,6 +21,7 @@
import logging
import sys
+import principalmapper
from principalmapper.analysis import cli as analysis_cli
from principalmapper.graphing import graph_cli
from principalmapper.graphing import orgs_cli
@@ -49,6 +50,11 @@ def main() -> int:
action='store_true',
help='Produces debug-level output of the underlying Principal Mapper library during execution.'
)
+ argument_parser.add_argument(
+ '--version',
+ action='version',
+ version=f'Principal Mapper v{principalmapper.__version__}'
+ )
# Create subparser for various subcommands
subparser = argument_parser.add_subparsers(
diff --git a/principalmapper/analysis/find_risks.py b/principalmapper/analysis/find_risks.py
index 9621439..4e94611 100644
--- a/principalmapper/analysis/find_risks.py
+++ b/principalmapper/analysis/find_risks.py
@@ -55,7 +55,8 @@ def gen_report(graph: Graph) -> Report:
"""Generates a Report object with findings and metadata about report-generation"""
findings = gen_all_findings(graph)
return Report(
- graph.metadata['account_id'],
+ graph.account,
+ graph.partition,
dt.datetime.now(dt.timezone.utc),
findings,
'Findings identified using Principal Mapper ({}) from NCC Group: https://github.com/nccgroup/PMapper'.format(
@@ -475,7 +476,7 @@ def gen_resources_with_potential_confused_deputies(graph: Graph) -> List[Finding
for action in action_list:
rpa_result = resource_policy_authorization(
service,
- graph.metadata['account_id'],
+ graph.account,
policy.policy_doc,
action,
policy.arn,
@@ -523,7 +524,10 @@ def print_report(report: Report) -> None:
print('----------------------------------------------------------------')
print('# Principal Mapper Findings')
print()
- print('Findings identified in AWS account {}'.format(report.account))
+ if report.partition == 'aws':
+ print('Findings identified in AWS account {}'.format(report.account))
+ else:
+ print(f'Findings identified in AWS account {report.account} ({report.partition})')
print()
print('Date and Time: {}'.format(report.date_and_time.isoformat()))
print()
diff --git a/principalmapper/analysis/report.py b/principalmapper/analysis/report.py
index 6bcee70..598debb 100644
--- a/principalmapper/analysis/report.py
+++ b/principalmapper/analysis/report.py
@@ -26,8 +26,9 @@ class Report:
utility function to convert the contents of the report to a dictionary object.
"""
- def __init__(self, account: str, date_and_time: dt.datetime, findings: List[Finding], source: str):
+ def __init__(self, account: str, partition: str, date_and_time: dt.datetime, findings: List[Finding], source: str):
self.account = account
+ self.partition = partition
self.date_and_time = date_and_time
self.findings = findings
self.source = source
@@ -36,6 +37,7 @@ def as_dictionary(self) -> dict:
"""Produces a dictionary representing this Report's contents."""
return {
'account': self.account,
+ 'partition': self.partition,
'date_and_time': self.date_and_time.isoformat(),
'findings': [x.as_dictionary() for x in self.findings],
'source': self.source
diff --git a/principalmapper/common/graphs.py b/principalmapper/common/graphs.py
index a6ff383..50cdacb 100644
--- a/principalmapper/common/graphs.py
+++ b/principalmapper/common/graphs.py
@@ -40,21 +40,34 @@ class Graph(object):
Graph data to/from files stored on-disk. The actual attributes of each graph/node/edge/policy/group object
will remain the same across the same major+minor version of Principal Mapper, so a graph generated in v1.0.0
should be loadable in v1.0.1, but not v1.1.0.
+
+ * **v1.2.0:** Shifted account/partition to arguments of Graph construction
"""
- def __init__(self, nodes: list = None, edges: list = None, policies: list = None, groups: list = None,
- metadata: dict = None):
+ def __init__(self, nodes: list, edges: list, policies: list, groups: list, account: str, partition: str,
+ metadata: dict):
"""Constructor"""
- for arg, value in {'nodes': nodes, 'edges': edges, 'policies': policies, 'groups': groups,
- 'metadata': metadata}.items():
- if value is None:
- raise ValueError('Required constructor argument {} was None'.format(arg))
+
+ for x in (nodes, edges, policies, groups, account, partition, metadata):
+ if x is None:
+ raise ValueError(f'Required argument {x} was None')
+
self.nodes = nodes
self.edges = edges
self.policies = policies
self.groups = groups
- if 'account_id' not in metadata:
- raise ValueError('Incomplete metadata input, expected key: "account_id"')
+
+ self.account = account
+ if not isinstance(account, str):
+ raise ValueError('Parameter `account` is expected to be str')
+
+ self.partition = partition
+ if not isinstance(partition, str):
+ raise ValueError('Parameter `partition` is expected to be str')
+
+ if not isinstance(metadata, dict):
+ raise ValueError('Parameter `metadata` is expected to be dict')
+
if 'pmapper_version' not in metadata:
raise ValueError('Incomplete metadata input, expected key: "pmapper_version"')
self.metadata = metadata
@@ -73,6 +86,7 @@ def store_graph_as_json(self, root_directory: str):
Structure:
|
+ |---- data.json
|---- metadata.json
|---- graph/
|-------- nodes.json
@@ -88,6 +102,7 @@ def store_graph_as_json(self, root_directory: str):
graphdir = os.path.join(rootpath, 'graph')
if not os.path.exists(graphdir):
os.makedirs(graphdir, 0o700)
+ regulardatafilepath = os.path.join(rootpath, 'data.json')
metadatafilepath = os.path.join(rootpath, 'metadata.json')
nodesfilepath = os.path.join(graphdir, 'nodes.json')
edgesfilepath = os.path.join(graphdir, 'edges.json')
@@ -95,6 +110,8 @@ def store_graph_as_json(self, root_directory: str):
groupsfilepath = os.path.join(graphdir, 'groups.json')
old_umask = os.umask(0o077) # block rwx for group/all
+ with open(regulardatafilepath, 'w') as f:
+ json.dump({'account': self.account, 'partition': self.partition}, f, indent=4)
with open(metadatafilepath, 'w') as f:
json.dump(self.metadata, f, indent=4)
with open(nodesfilepath, 'w') as f:
@@ -113,6 +130,7 @@ def create_graph_from_local_disk(cls, root_directory: str):
Structure:
|
+ |---- data.json
|---- metadata.json
|---- graph/
|-------- nodes.json
@@ -132,6 +150,7 @@ def create_graph_from_local_disk(cls, root_directory: str):
raise ValueError('Did not find file at: {}'.format(rootpath))
graphdir = os.path.join(rootpath, 'graph')
metadatafilepath = os.path.join(rootpath, 'metadata.json')
+ regulardatafilepath = os.path.join(rootpath, 'data.json')
nodesfilepath = os.path.join(graphdir, 'nodes.json')
edgesfilepath = os.path.join(graphdir, 'edges.json')
policiesfilepath = os.path.join(graphdir, 'policies.json')
@@ -142,12 +161,16 @@ def create_graph_from_local_disk(cls, root_directory: str):
current_pmapper_version = packaging.version.parse(principalmapper.__version__)
loaded_graph_version = packaging.version.parse(metadata['pmapper_version'])
- if current_pmapper_version.release[0] != loaded_graph_version.release[0] or \
- current_pmapper_version.release[1] != loaded_graph_version.release[1]:
- raise ValueError('Loaded Graph data was from a different version of Principal Mapper ({}), but the current '
- 'version of Principal Mapper ({}) may not support it. Either update the stored Graph data '
- 'and its metadata, or regraph the account.'.format(loaded_graph_version,
- current_pmapper_version))
+ if current_pmapper_version.major != loaded_graph_version.major or current_pmapper_version.minor != loaded_graph_version.minor:
+ raise ValueError(
+ f'The loaded Graph data came from a different version of Principal Mapper '
+ f'({str(loaded_graph_version)}) that is not compatible with this version of Principal Mapper '
+ f'({str(current_pmapper_version)}). You will need to recreate the organization (`pmapper orgs '
+ f'create`).'
+ )
+
+ with open(regulardatafilepath) as f:
+ acctdata = json.load(f) # type: dict
policies = []
with open(policiesfilepath) as f:
@@ -216,4 +239,12 @@ def create_graph_from_local_disk(cls, root_directory: str):
edges.append(Edge(source=source, destination=destination, reason=edge['reason'],
short_reason=edge['short_reason']))
- return Graph(nodes=nodes, edges=edges, policies=policies, groups=groups, metadata=metadata)
+ return Graph(
+ nodes=nodes,
+ edges=edges,
+ policies=policies,
+ groups=groups,
+ account=acctdata.get('account'),
+ partition=acctdata.get('partition'),
+ metadata=metadata
+ )
diff --git a/principalmapper/common/nodes.py b/principalmapper/common/nodes.py
index 551c35c..7e85ac2 100644
--- a/principalmapper/common/nodes.py
+++ b/principalmapper/common/nodes.py
@@ -105,7 +105,8 @@ def get_outbound_edges(self, graph): # -> List[Edge], can't import Edge/Graph i
self.cache['outbound_edges'] = []
if self.is_admin:
for node in graph.nodes:
- if node == self:
+ # skip self-links and links to service-linked roles (not even accessible to admins)
+ if node == self or node.is_service_linked_role():
continue
else:
self.cache['outbound_edges'].append(
@@ -119,6 +120,15 @@ def get_outbound_edges(self, graph): # -> List[Edge], can't import Edge/Graph i
self.cache['outbound_edges'].append(edge)
return self.cache['outbound_edges']
+ def is_service_linked_role(self):
+ if 'is_service_linked_role' not in self.cache:
+ if ':role/' in self.arn:
+ role_name = self.arn.split('/')[-1]
+ self.cache['is_service_linked_role'] = role_name.startswith('AWSServiceRoleFor')
+ else:
+ self.cache['is_service_linked_role'] = False
+ return self.cache['is_service_linked_role']
+
def to_dictionary(self) -> dict:
"""Creates a dictionary representation of this Node for storage."""
_pb = self.permissions_boundary
diff --git a/principalmapper/common/org_trees.py b/principalmapper/common/org_trees.py
index cff72b8..e023a91 100644
--- a/principalmapper/common/org_trees.py
+++ b/principalmapper/common/org_trees.py
@@ -19,6 +19,10 @@
import os.path
from typing import List, Optional, Tuple
+import packaging
+import packaging.version
+
+import principalmapper
from principalmapper.common import Edge
from principalmapper.common.policies import Policy
@@ -86,10 +90,13 @@ def as_dictionary(self) -> dict:
class OrganizationTree(object):
"""The OrganizationGraph object represents an AWS Organization, which is a collection of AWS accounts. These
accounts are organized in a hierarchy (we use a tree for this).
+
+ * **v1.2.0:** Added the required 'partition' field
"""
def __init__(self, org_id: str, management_account_id: str, root_ous: List[OrganizationNode],
- all_scps: List[Policy], accounts: List[str], edge_list: List[Edge], metadata: dict):
+ all_scps: List[Policy], accounts: List[str], edge_list: List[Edge], metadata: dict,
+ partition: str):
self.org_id = org_id
self.management_account_id = management_account_id
self.root_ous = root_ous
@@ -99,6 +106,7 @@ def __init__(self, org_id: str, management_account_id: str, root_ous: List[Organ
if 'pmapper_version' not in metadata:
raise ValueError('The pmapper_version key/value (str) is required: {"pmapper_version": "..."}')
self.metadata = metadata
+ self.partition = partition
def as_dictionary(self) -> dict:
"""Returns a dictionary representation of this OrganizationTree object. Used for serialization to disk. We
@@ -109,7 +117,8 @@ def as_dictionary(self) -> dict:
'management_account_id': self.management_account_id,
'root_ous': [x.as_dictionary() for x in self.root_ous],
'edge_list': [x.to_dictionary() for x in self.edge_list],
- 'accounts': self.accounts
+ 'accounts': self.accounts,
+ 'partition': self.partition
}
def save_organization_to_disk(self, dirpath: str):
@@ -163,6 +172,17 @@ def create_from_dir(cls, dirpath: str):
with open(metadata_filepath) as fd:
metadata_obj = json.load(fd)
+ # verify pmapper_version
+ current_pmapper_version = packaging.version.parse(principalmapper.__version__)
+ loaded_orgtree_version = packaging.version.parse(metadata_obj['pmapper_version'])
+ if current_pmapper_version.major != loaded_orgtree_version.major or current_pmapper_version.minor != loaded_orgtree_version.minor:
+ raise ValueError(
+ f'The loaded organization data came from a different version of Principal Mapper '
+ f'({str(loaded_orgtree_version)}) that is not compatible with this version of Principal Mapper '
+ f'({str(current_pmapper_version)}). You will need to recreate the organization (`pmapper orgs '
+ f'create`).'
+ )
+
# load the OrganizationX objects
org_datafile_path = os.path.join(dirpath, 'org_data.json')
with open(org_datafile_path) as fd:
@@ -188,5 +208,6 @@ def _produce_ou(ou_dict: dict) -> OrganizationNode:
[x for x in policies.values()],
org_dictrepr['accounts'],
org_dictrepr['edge_list'],
- metadata_obj
+ metadata_obj,
+ org_dictrepr['partition']
)
diff --git a/principalmapper/graphing/autoscaling_edges.py b/principalmapper/graphing/autoscaling_edges.py
index 78e5b2e..d2d2adc 100644
--- a/principalmapper/graphing/autoscaling_edges.py
+++ b/principalmapper/graphing/autoscaling_edges.py
@@ -35,7 +35,7 @@ class AutoScalingEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges."""
logger.info('Generating Edges based on EC2 Auto Scaling.')
@@ -48,7 +48,7 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
# Gather projects information for each region
autoscaling_clients = []
if self.session is not None:
- as_regions = botocore_tools.get_regions_to_search(self.session, 'autoscaling', region_allow_list, region_deny_list)
+ as_regions = botocore_tools.get_regions_to_search(self.session, 'autoscaling', region_allow_list, region_deny_list, partition)
for region in as_regions:
autoscaling_clients.append(self.session.create_client('autoscaling', region_name=region, **asargs))
@@ -67,14 +67,16 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
})
except ClientError as ex:
- logger.warning('Unable to search region {} for launch configs. The region may be disabled, or the error may '
- 'be caused by an authorization issue. Continuing.'.format(as_client.meta.region_name))
- logger.debug('Exception details: {}'.format(ex))
+ logger.warning(
+ f'Unable to search region {as_client.meta.region_name} for launch configs. The region may be '
+ f'disabled, or the error may be caused by an authorization issue. Continuing.'
+ )
+ logger.debug(f'Exception details: {ex}')
result = generate_edges_locally(nodes, scps, launch_configs)
for edge in result:
- logger.info("Found new edge: {}".format(edge.describe_edge()))
+ logger.info(f"Found new edge: {edge.describe_edge()}")
return result
diff --git a/principalmapper/graphing/cloudformation_edges.py b/principalmapper/graphing/cloudformation_edges.py
index b2f867f..7ccf01f 100644
--- a/principalmapper/graphing/cloudformation_edges.py
+++ b/principalmapper/graphing/cloudformation_edges.py
@@ -37,7 +37,7 @@ class CloudFormationEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges."""
logger.info('Pulling data on CloudFormation stacks.')
@@ -50,7 +50,7 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
# Grab existing stacks in each region
cloudformation_clients = []
if self.session is not None:
- cf_regions = botocore_tools.get_regions_to_search(self.session, 'cloudformation', region_allow_list, region_deny_list)
+ cf_regions = botocore_tools.get_regions_to_search(self.session, 'cloudformation', region_allow_list, region_deny_list, partition)
for region in cf_regions:
cloudformation_clients.append(self.session.create_client('cloudformation', region_name=region, **cfargs))
diff --git a/principalmapper/graphing/codebuild_edges.py b/principalmapper/graphing/codebuild_edges.py
index a8b8cd2..ffa14cb 100644
--- a/principalmapper/graphing/codebuild_edges.py
+++ b/principalmapper/graphing/codebuild_edges.py
@@ -35,7 +35,7 @@ class CodeBuildEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges."""
logger.info('Generating Edges based on CodeBuild.')
@@ -49,7 +49,7 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
codebuild_clients = []
if self.session is not None:
- cf_regions = botocore_tools.get_regions_to_search(self.session, 'codebuild', region_allow_list, region_deny_list)
+ cf_regions = botocore_tools.get_regions_to_search(self.session, 'codebuild', region_allow_list, region_deny_list, partition)
for region in cf_regions:
codebuild_clients.append(self.session.create_client('codebuild', region_name=region, **cbargs))
diff --git a/principalmapper/graphing/cross_account_edges.py b/principalmapper/graphing/cross_account_edges.py
index 511502e..cdcffc1 100644
--- a/principalmapper/graphing/cross_account_edges.py
+++ b/principalmapper/graphing/cross_account_edges.py
@@ -45,7 +45,7 @@ def _check_assume_role(ga, na, gb, nb, scps) -> bool:
conditions['aws:username'] = na.searchable_name().split('/')[1]
conditions['aws:SecureTransport'] = 'true'
- conditions['aws:PrincipalAccount'] = ga.metadata['account_id']
+ conditions['aws:PrincipalAccount'] = ga.account
conditions['aws:PrincipalArn'] = na.arn
if 'org-id' in ga.metadata:
conditions['aws:PrincipalOrgID'] = ga.metadata['org-id']
diff --git a/principalmapper/graphing/datapipeline_edges.py b/principalmapper/graphing/datapipeline_edges.py
new file mode 100644
index 0000000..607dc89
--- /dev/null
+++ b/principalmapper/graphing/datapipeline_edges.py
@@ -0,0 +1,194 @@
+"""Code to identify if a principal in an AWS account can use access to AWS Data Pipeline to access other principals."""
+
+
+# Copyright (c) NCC Group and Erik Steringer 2019. This file is part of Principal Mapper.
+#
+# Principal Mapper is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Principal Mapper is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Principal Mapper. If not, see .
+
+import logging
+from typing import Dict, List, Optional
+
+from botocore.exceptions import ClientError
+
+from principalmapper.common import Edge, Node
+from principalmapper.graphing.edge_checker import EdgeChecker
+from principalmapper.querying import query_interface
+from principalmapper.querying.local_policy_simulation import resource_policy_authorization, ResourcePolicyEvalResult
+from principalmapper.util import arns, botocore_tools
+
+logger = logging.getLogger(__name__)
+
+
+class DataPipelineEdgeChecker(EdgeChecker):
+ """Class for identifying if Data Pipeline can be used by IAM principals to gain access to other IAM principals."""
+
+ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
+ region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
+ """Fulfills expected method return_edges."""
+
+ logger.info('Generating Edges based on Data Pipeline.')
+
+ result = generate_edges_locally(nodes, scps)
+
+ for edge in result:
+ logger.info("Found new edge: {}".format(edge.describe_edge()))
+
+ return result
+
+
+def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] = None) -> List[Edge]:
+ """For Data Pipeline, we do something a little different. The way people can use DataPipeline to pivot is
+ to create a pipeline, then put a definition on the pipeline that creates an EC2 instance resource. The
+ role that's used by the EC2 instance is the ultimate target. This requires:
+
+ * datapipeline:CreatePipeline (resource "*")
+ * datapipeline:PutPipelineDefinition (resource "*")
+ * iam:PassRole for the Data Pipeline Role (which must trust datapipeline.amazonaws.com)
+ * iam:PassRole for the EC2 Data Pipeline Role (which must trust ec2.amazonaws.com and have an instance profile)
+
+ Note that we have two roles involved. Data Pipeline Role, which seems to be a sorta service role but
+ doesn't have the same path/naming convention as other service roles, is used to actually call EC2 and
+ spin up the target instance. It's meant to be accessible to datapipeline.amazonaws.com. Then, we have
+ the EC2 Data Pipeline Role, which actually is accessible to the EC2 instance doing the computational
+ work of the pipeline.
+
+ Other works/blogs seemed to indicate the Data Pipeline Role was accessible, however that might not be true
+ anymore? In any case, recent experimentation only allowed me access to the EC2 Data Pipeline Role.
+
+ To create the list of edges, we gather our:
+
+ * Potential Data Pipeline Roles
+ * Potential EC2 Data Pipeline Roles
+
+ Then we determine which of the EC2 roles are accessible to the Data Pipeline Roles, then run through all
+ potential source nodes to see if they have the correct datapipeline:* + iam:PassRole permissions, then generate
+ edges that have the EC2 roles as destinations.
+
+ This vector is neat because even if specific EC2-accessible roles are blocked via ec2:RunInstances, this might be
+ an alternative option the same as autoscaling was.
+ """
+
+ results = []
+
+ intermediate_node_paths = {}
+ destination_nodes = []
+ for node in nodes:
+ if ':role/' not in node.arn:
+ continue
+
+ rp_result = resource_policy_authorization(
+ 'datapipeline.amazonaws.com',
+ arns.get_account_id(node.arn),
+ node.trust_policy,
+ 'sts:AssumeRole',
+ node.arn,
+ {}
+ )
+
+ if rp_result is ResourcePolicyEvalResult.SERVICE_MATCH:
+ intermediate_node_paths[node] = []
+
+ rp_result = resource_policy_authorization(
+ 'ec2.amazonaws.com',
+ arns.get_account_id(node.arn),
+ node.trust_policy,
+ 'sts:AssumeRole',
+ node.arn,
+ {}
+ )
+
+ if rp_result is ResourcePolicyEvalResult.SERVICE_MATCH and node.instance_profile is not None:
+ destination_nodes.append(node)
+
+ for intermediate_node in intermediate_node_paths.keys():
+ for destination_node in destination_nodes:
+ # if intermediate can run EC2 and pass the role, then add that path for checking
+ if not query_interface.local_check_authorization(
+ intermediate_node,
+ 'ec2:RunInstances',
+ '*',
+ {'ec2:InstanceProfile': destination_node.instance_profile}
+ ):
+ continue
+
+ if query_interface.local_check_authorization(
+ intermediate_node,
+ 'iam:PassRole',
+ destination_node.arn,
+ {'iam:PassedToService': 'ec2.amazonaws.com'}
+ ):
+ intermediate_node_paths[intermediate_node].append(destination_node)
+
+ # now we have the mappings for -> paths
+ for node_source in nodes:
+ if node_source.is_admin:
+ continue
+
+ create_pipeline_auth, cpa_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'datapipeline:CreatePipeline',
+ '*',
+ {},
+ service_control_policy_groups=scps
+ )
+ if not create_pipeline_auth:
+ continue
+
+ put_pipeline_def_auth, ppda_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'datapipeline:PutPipelineDefinition',
+ '*',
+ {},
+ service_control_policy_groups=scps
+ )
+ if not put_pipeline_def_auth:
+ continue
+
+ for intermediate_node in intermediate_node_paths.keys():
+ intermediate_node_auth, ina_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:PassRole',
+ intermediate_node.arn,
+ {'iam:PassedToService': 'datapipeline.amazonaws.com'},
+ service_control_policy_groups=scps
+ )
+ if not intermediate_node_auth:
+ continue # can't use the intermediate to get to the destinations, so we move on
+
+ for destination_node in destination_nodes:
+ if node_source == destination_node:
+ continue
+
+ destination_node_auth, dna_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:PassRole',
+ destination_node.arn,
+ {},
+ service_control_policy_groups=scps
+ )
+ if destination_node_auth:
+ if cpa_mfa or ppda_mfa or ina_mfa or dna_mfa:
+ reason = f'(requires MFA) can use Data Pipeline with {intermediate_node.searchable_name()} to access '
+ else:
+ reason = f'can use Data Pipeline with {intermediate_node.searchable_name()} to access'
+
+ results.append(Edge(
+ node_source,
+ destination_node,
+ reason,
+ 'Data Pipeline'
+ ))
+
+ return results
diff --git a/principalmapper/graphing/ec2_edges.py b/principalmapper/graphing/ec2_edges.py
index 82f0325..b5145fd 100644
--- a/principalmapper/graphing/ec2_edges.py
+++ b/principalmapper/graphing/ec2_edges.py
@@ -36,11 +36,11 @@ class EC2EdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges."""
logger.info('Generating Edges based on EC2.')
- result = generate_edges_locally(nodes, scps)
+ result = generate_edges_locally(nodes, scps, partition)
for edge in result:
logger.info("Found new edge: {}".format(edge.describe_edge()))
@@ -48,7 +48,7 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
return result
-def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] = None) -> List[Edge]:
+def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] = None, partition: str = 'aws') -> List[Edge]:
"""Generates and returns Edge objects. It is possible to use this method if you are operating offline (infra-as-code).
"""
@@ -69,7 +69,20 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
)
if sim_result != ResourcePolicyEvalResult.SERVICE_MATCH:
- continue # EC2 wasn't auth'd to assume the role
+ if partition != 'aws-cn':
+ continue
+ else:
+ # special case: AWS China uses ec2.amazonaws.com.cn as a service principal, so retry
+ sim_result = resource_policy_authorization(
+ 'ec2.amazonaws.com.cn',
+ arns.get_account_id(node_destination.arn),
+ node_destination.trust_policy,
+ 'sts:AssumeRole',
+ node_destination.arn,
+ {},
+ )
+ if sim_result != ResourcePolicyEvalResult.SERVICE_MATCH:
+ continue
for node_source in nodes:
# skip self-access checks
@@ -91,7 +104,19 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
service_control_policy_groups=scps
)
if not pass_role_auth:
- continue # source can't pass the role to use it
+ if partition != 'aws-cn':
+ continue # source can't pass the role to use it
+ else:
+ # try again with AWS China EC2 SP
+ pass_role_auth, mfa_res = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:PassRole',
+ node_destination.arn,
+ {'iam:PassedToService': 'ec2.amazonaws.com.cn'},
+ service_control_policy_groups=scps
+ )
+ if not pass_role_auth:
+ continue
# check if destination has an instance profile, if not: check if source can create it
if node_destination.instance_profile is None:
diff --git a/principalmapper/graphing/ecs_edges.py b/principalmapper/graphing/ecs_edges.py
new file mode 100644
index 0000000..e811545
--- /dev/null
+++ b/principalmapper/graphing/ecs_edges.py
@@ -0,0 +1,134 @@
+"""Code to identify if a principal in an AWS account can use access to ECS to access other principals."""
+
+
+# Copyright (c) NCC Group and Erik Steringer 2022. This file is part of Principal Mapper.
+#
+# Principal Mapper is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Principal Mapper is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Principal Mapper. If not, see .
+
+import io
+import logging
+import os
+from typing import List, Optional
+
+from principalmapper.common import Edge, Node
+from principalmapper.graphing.edge_checker import EdgeChecker
+from principalmapper.querying import query_interface
+from principalmapper.querying.local_policy_simulation import resource_policy_authorization, ResourcePolicyEvalResult
+from principalmapper.util import arns
+
+
+logger = logging.getLogger(__name__)
+
+
+class ECSEdgeChecker(EdgeChecker):
+ """Class for identifying if ECS can be used by IAM principals to gain access to other IAM principals."""
+
+ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
+ region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
+ """Fulfills expected method return_edges."""
+
+ logger.info('Generating Edges based on ECS.')
+ result = generate_edges_locally(nodes, scps, partition)
+
+ for edge in result:
+ logger.info("Found new edge: {}".format(edge.describe_edge()))
+
+ return result
+
+
+def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] = None, partition: str = 'aws') -> List[Edge]:
+ """Generates and returns Edge objects. It is possible to use this method if you are operating offline (infra-as-code).
+ """
+
+ # TODO: pull and include existing clusters, tasks, services
+
+ result = []
+
+ service_linked_role_exists = False
+ for node in nodes:
+ if ':role/aws-service-role/ecs.amazonaws.com/AWSServiceRoleForECS' in node.arn:
+ service_linked_role_exists = True # can update to point to node if we need to do intermediate checks
+ break
+
+ for node_destination in nodes:
+ if ':role/' not in node_destination.arn:
+ continue
+
+ sim_result = resource_policy_authorization(
+ 'ecs-tasks.amazonaws.com',
+ arns.get_account_id(node_destination.arn),
+ node_destination.trust_policy,
+ 'sts:AssumeRole',
+ node_destination.arn,
+ {}
+ )
+ if sim_result is not ResourcePolicyEvalResult.SERVICE_MATCH:
+ continue
+
+ for node_source in nodes:
+ if node_source == node_destination:
+ continue
+
+ if node_source.is_admin:
+ continue
+
+ # check that either the service-linked role exists or needs to be created
+ create_slr_auth = False
+ create_slr_mfa = False
+ if not service_linked_role_exists:
+ # using auth/mfa var, since the control flow continues to the next loop if we cannot make the SLR
+ create_slr_auth, create_slr_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:CreateServiceLinkedRole',
+ f'arn:aws:iam::{arns.get_account_id(node_source.arn)}:role/aws-service-role/ecs.amazonaws.com/AWSServiceRoleForECS',
+ {'iam:AWSServiceName': 'ecs.amazonaws.com'},
+ service_control_policy_groups=scps
+ )
+ if not create_slr_auth:
+ continue # can't make the service-linked role -> can't use ECS (?)
+
+ # check if someone can pass this role as an ECS Task Role
+ pass_role_auth, pass_role_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:PassRole',
+ node_destination.arn,
+ {'iam:PassedToService': 'ecs-tasks.amazonaws.com'}, # verified via managed policies,
+ service_control_policy_groups=scps
+ )
+
+ if not pass_role_auth:
+ continue
+
+ # check if someone can start/run a task
+ run_task_auth, run_task_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'ecs:RunTask',
+ '*',
+ {},
+ service_control_policy_groups=scps
+ )
+
+ if not run_task_auth:
+ continue
+
+ reason = f'{"(requires MFA) " if create_slr_mfa or pass_role_mfa or run_task_mfa else ""}can ' \
+ f'{"use the existing ECS Service-Linked Role" if service_linked_role_exists else "create the ECS Service-Linked Role"} ' \
+ f'to run a task in ECS and access '
+
+ result.append(Edge(
+ node_source, node_destination, reason, 'ECS'
+ ))
+
+ return result
\ No newline at end of file
diff --git a/principalmapper/graphing/edge_checker.py b/principalmapper/graphing/edge_checker.py
index 5930e43..a69ccef 100644
--- a/principalmapper/graphing/edge_checker.py
+++ b/principalmapper/graphing/edge_checker.py
@@ -33,9 +33,9 @@ def __init__(self, session: botocore.session.Session):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
- """Subclasses shall override this method. Given a list of nodes, the EdgeChecker should be able to use its session
- object in order to make clients and call the AWS API to resolve information about the account. Then,
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
+ """Subclasses shall override this method. Given a list of nodes, the EdgeChecker should be able to use its
+ session object in order to make clients and call the AWS API to resolve information about the account. Then,
with this information, it should return a list of edges between the passed nodes.
The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of
diff --git a/principalmapper/graphing/edge_identification.py b/principalmapper/graphing/edge_identification.py
index 251d45e..382115d 100644
--- a/principalmapper/graphing/edge_identification.py
+++ b/principalmapper/graphing/edge_identification.py
@@ -24,7 +24,10 @@
from principalmapper.graphing.autoscaling_edges import AutoScalingEdgeChecker
from principalmapper.graphing.cloudformation_edges import CloudFormationEdgeChecker
from principalmapper.graphing.codebuild_edges import CodeBuildEdgeChecker
+from principalmapper.graphing.datapipeline_edges import DataPipelineEdgeChecker
from principalmapper.graphing.ec2_edges import EC2EdgeChecker
+from principalmapper.graphing.ecs_edges import ECSEdgeChecker
+from principalmapper.graphing.glue_edges import GlueEdgeChecker
from principalmapper.graphing.iam_edges import IAMEdgeChecker
from principalmapper.graphing.lambda_edges import LambdaEdgeChecker
from principalmapper.graphing.sagemaker_edges import SageMakerEdgeChecker
@@ -40,7 +43,10 @@
'autoscaling': AutoScalingEdgeChecker,
'cloudformation': CloudFormationEdgeChecker,
'codebuild': CodeBuildEdgeChecker,
+ 'datapipeline': DataPipelineEdgeChecker,
'ec2': EC2EdgeChecker,
+ 'ecs': ECSEdgeChecker, # TODO: need to verify ECS work
+ 'glue': GlueEdgeChecker,
'iam': IAMEdgeChecker,
'lambda': LambdaEdgeChecker,
'sagemaker': SageMakerEdgeChecker,
@@ -51,14 +57,17 @@
def obtain_edges(session: Optional[botocore.session.Session], checker_list: List[str], nodes: List[Node],
region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None,
- scps: Optional[List[List[dict]]] = None, client_args_map: Optional[dict] = None) -> List[Edge]:
+ scps: Optional[List[List[dict]]] = None, client_args_map: Optional[dict] = None,
+ partition: str = 'aws') -> List[Edge]:
"""Given a list of nodes and a botocore Session, return a list of edges between those nodes. Only checks
against services passed in the checker_list param. """
result = []
logger.info('Initiating edge checks.')
- logger.debug('Services being checked for edges: {}'.format(checker_list))
+ logger.debug(f'Services being checked for edges: {checker_list}')
for check in checker_list:
if check in checker_map:
checker_obj = checker_map[check](session)
- result.extend(checker_obj.return_edges(nodes, region_allow_list, region_deny_list, scps, client_args_map))
+ result.extend(
+ checker_obj.return_edges(nodes, region_allow_list, region_deny_list, scps, client_args_map, partition)
+ )
return result
diff --git a/principalmapper/graphing/gathering.py b/principalmapper/graphing/gathering.py
index 9e64d95..6f06903 100644
--- a/principalmapper/graphing/gathering.py
+++ b/principalmapper/graphing/gathering.py
@@ -63,9 +63,10 @@ def create_graph(session: botocore.session.Session, service_list: list, region_a
stsclient = session.create_client('sts', **stsargs)
logger.debug(stsclient.meta.endpoint_url)
caller_identity = stsclient.get_caller_identity()
- logger.debug("Caller Identity: {}".format(caller_identity['Arn']))
+ logger.debug(f"Caller Identity: {caller_identity['Arn']}")
+ current_partition = arns.get_partition(caller_identity['Arn'])
+ current_account = caller_identity['Account']
metadata = {
- 'account_id': caller_identity['Account'],
'pmapper_version': principalmapper.__version__
}
@@ -88,20 +89,21 @@ def create_graph(session: botocore.session.Session, service_list: list, region_a
region_allow_list,
region_deny_list,
scps,
- client_args_map
+ client_args_map,
+ current_partition
)
# Pull S3, SNS, SQS, KMS, and Secrets Manager resource policies
try:
- policies_result.extend(get_s3_bucket_policies(session, client_args_map))
- policies_result.extend(get_sns_topic_policies(session, region_allow_list, region_deny_list, client_args_map))
- policies_result.extend(get_sqs_queue_policies(session, caller_identity['Account'], region_allow_list, region_deny_list, client_args_map))
- policies_result.extend(get_kms_key_policies(session, region_allow_list, region_deny_list, client_args_map))
- policies_result.extend(get_secrets_manager_policies(session, region_allow_list, region_deny_list, client_args_map))
+ policies_result.extend(get_s3_bucket_policies(session, client_args_map, current_partition))
+ policies_result.extend(get_sns_topic_policies(session, region_allow_list, region_deny_list, client_args_map, current_partition))
+ policies_result.extend(get_sqs_queue_policies(session, caller_identity['Account'], region_allow_list, region_deny_list, client_args_map, current_partition))
+ policies_result.extend(get_kms_key_policies(session, region_allow_list, region_deny_list, client_args_map, current_partition))
+ policies_result.extend(get_secrets_manager_policies(session, region_allow_list, region_deny_list, client_args_map, current_partition))
except:
pass
- return Graph(nodes_result, edges_result, policies_result, groups_result, metadata)
+ return Graph(nodes_result, edges_result, policies_result, groups_result, current_account, current_partition, metadata)
def get_nodes_groups_and_policies(iamclient) -> dict:
@@ -282,7 +284,8 @@ def get_nodes_groups_and_policies(iamclient) -> dict:
return result
-def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: Optional[dict] = None) -> List[Policy]:
+def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: Optional[dict] = None,
+ partition: str = 'aws') -> List[Policy]:
"""Using a botocore Session object, return a list of Policy objects representing the bucket policies of each
S3 bucket in this account.
"""
@@ -291,7 +294,7 @@ def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: O
s3client = session.create_client('s3', **s3args)
buckets = [x['Name'] for x in s3client.list_buckets()['Buckets']]
for bucket in buckets:
- bucket_arn = 'arn:aws:s3:::{}'.format(bucket) # TODO: allow different partition
+ bucket_arn = f'arn:aws:{partition}:::{bucket}'
try:
bucket_policy = json.loads(s3client.get_bucket_policy(Bucket=bucket)['Policy'])
result.append(Policy(
@@ -299,12 +302,10 @@ def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: O
bucket,
bucket_policy
))
- logger.info('Caching policy for {}'.format(bucket_arn))
+ logger.info(f'Caching policy for {bucket_arn}')
except botocore.exceptions.ClientError as ex:
if 'NoSuchBucketPolicy' in str(ex):
- logger.info('Bucket {} does not have a bucket policy, adding a "stub" policy instead.'.format(
- bucket
- ))
+ logger.info(f'Bucket {bucket} does not have a bucket policy, adding a "stub" policy instead.')
result.append(Policy(
bucket_arn,
bucket,
@@ -314,14 +315,15 @@ def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: O
}
))
else:
- logger.info('Unable to retrieve bucket policy for {}. You should add this manually. Continuing.'.format(bucket))
- logger.debug('Exception was: {}'.format(ex))
+ logger.info(f'Unable to retrieve bucket policy for {bucket}. You should add this manually. Continuing.')
+ logger.debug(f'Exception was: {ex}')
return result
def get_kms_key_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None,
- region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]:
+ region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None,
+ partition: str = 'aws') -> List[Policy]:
"""Using a botocore Session object, return a list of Policy objects representing the key policies of each
KMS key in this account.
@@ -333,7 +335,7 @@ def get_kms_key_policies(session: botocore.session.Session, region_allow_list: O
kmsargs = client_args_map.get('kms', {})
# Iterate through all regions of KMS where possible
- for kms_region in get_regions_to_search(session, 'kms', region_allow_list, region_deny_list):
+ for kms_region in get_regions_to_search(session, 'kms', region_allow_list, region_deny_list, partition):
try:
# Grab the keys
cmks = []
@@ -352,15 +354,19 @@ def get_kms_key_policies(session: botocore.session.Session, region_allow_list: O
))
logger.info('Caching policy for {}'.format(cmk))
except botocore.exceptions.ClientError as ex:
- logger.info('Unable to search KMS in region {} for key policies. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(kms_region))
- logger.debug('Exception was: {}'.format(ex))
+ logger.info(
+ f'Unable to search KMS in region {kms_region} for key policies. The region may be disabled, or the '
+ f'current principal may not be authorized to access the service. Continuing.'
+ )
+ logger.debug(f'Exception was: {ex}')
continue
return result
def get_sns_topic_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None,
- region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]:
+ region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None,
+ partition: str = 'aws') -> List[Policy]:
"""Using a botocore Session object, return a list of Policy objects representing the topic policies of each
SNS topic in this account.
@@ -372,7 +378,7 @@ def get_sns_topic_policies(session: botocore.session.Session, region_allow_list:
snsargs = client_args_map.get('sns', {})
# Iterate through all regions of SNS where possible
- for sns_region in get_regions_to_search(session, 'sns', region_allow_list, region_deny_list):
+ for sns_region in get_regions_to_search(session, 'sns', region_allow_list, region_deny_list, partition):
try:
# Grab the topics
topics = []
@@ -391,8 +397,11 @@ def get_sns_topic_policies(session: botocore.session.Session, region_allow_list:
))
logger.info('Caching policy for {}'.format(topic))
except botocore.exceptions.ClientError as ex:
- logger.info('Unable to search SNS in region {} for topic policies. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(sns_region))
- logger.debug('Exception was: {}'.format(ex))
+ logger.info(
+ f'Unable to search SNS in region {sns_region} for topic policies. The region may be disabled, or '
+ f'the current principal may not be authorized to access the service. Continuing.'
+ )
+ logger.debug(f'Exception was: {ex}')
continue
return result
@@ -400,7 +409,7 @@ def get_sns_topic_policies(session: botocore.session.Session, region_allow_list:
def get_sqs_queue_policies(session: botocore.session.Session, account_id: str,
region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None,
- client_args_map: Optional[dict] = None) -> List[Policy]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Policy]:
"""Using a botocore Session object, return a list of Policy objects representing the queue policies of each
SQS queue in this account.
@@ -412,7 +421,7 @@ def get_sqs_queue_policies(session: botocore.session.Session, account_id: str,
sqsargs = client_args_map.get('sqs', {})
# Iterate through all regions of SQS where possible
- for sqs_region in get_regions_to_search(session, 'sqs', region_allow_list, region_deny_list):
+ for sqs_region in get_regions_to_search(session, 'sqs', region_allow_list, region_deny_list, partition):
try:
# Grab the queue names
queue_urls = []
@@ -430,30 +439,34 @@ def get_sqs_queue_policies(session: botocore.session.Session, account_id: str,
if 'Policy' in sqs_policy_response:
sqs_policy_doc = json.loads(sqs_policy_response['Policy'])
result.append(Policy(
- 'arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name),
+ f'arn:{partition}:sqs:{sqs_region}:{account_id}:{queue_name}',
queue_name,
json.loads(sqs_policy_doc)
))
- logger.info('Caching policy for {}'.format('arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name)))
+ logger.info(f'Caching policy for {f"arn:{partition}:sqs:{sqs_region}:{account_id}:{queue_name}"}')
else:
result.append(Policy(
- 'arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name),
+ f'arn:{partition}:sqs:{sqs_region}:{account_id}:{queue_name}',
queue_name,
{
"Statement": [],
"Version": "2012-10-17"
}
))
- logger.info('Queue {} does not have a queue policy, adding a "stub" policy instead.'.format(queue_name))
+ logger.info(f'Queue {queue_name} does not have a queue policy, adding a "stub" policy instead.')
except botocore.exceptions.ClientError as ex:
- logger.info('Unable to search SQS in region {} for queues. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(sqs_region))
- logger.debug('Exception was: {}'.format(ex))
+ logger.info(
+ f'Unable to search SQS in region {sqs_region} for queues. The region may be disabled, or the current '
+ f'principal may not be authorized to access the service. Continuing.'
+ )
+ logger.debug(f'Exception was: {ex}')
return result
def get_secrets_manager_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None,
- region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]:
+ region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None,
+ partition: str = 'aws') -> List[Policy]:
"""Using a botocore Session object, return a list of Policy objects representing the resource policies
of the secrets in AWS Secrets Manager.
@@ -465,7 +478,7 @@ def get_secrets_manager_policies(session: botocore.session.Session, region_allow
smargs = client_args_map.get('secretsmanager', {})
# Iterate through all regions of Secrets Manager where possible
- for sm_region in get_regions_to_search(session, 'secretsmanager', region_allow_list, region_deny_list):
+ for sm_region in get_regions_to_search(session, 'secretsmanager', region_allow_list, region_deny_list, partition):
try:
# Grab the ARNs of the secrets in this region
secret_arns = []
@@ -850,6 +863,7 @@ def get_organizations_data(session: botocore.session.Session) -> OrganizationTre
# grab account data
stsclient = session.create_client('sts')
account_data = stsclient.get_caller_identity()
+ partition = arns.get_partition(account_data['Arn'])
# try to grab org data, raising RuntimeError if appropriate
try:
@@ -857,9 +871,11 @@ def get_organizations_data(session: botocore.session.Session) -> OrganizationTre
organization_data = orgsclient.describe_organization()
except botocore.exceptions.ClientError as ex:
if 'AccessDeniedException' in str(ex):
- raise RuntimeError('Encountered a permission error. Either the current principal ({}) is not authorized to '
- 'interact with AWS Organizations, or the current account ({}) is not the '
- 'management account'.format(account_data['Arn'], account_data['Account']))
+ raise RuntimeError(
+ f'Encountered a permission error. Either the current principal ({account_data["Arn"]}) is not '
+ f'authorized to interact with AWS Organizations, or the current account '
+ f'({account_data["Account"]}) is not the management account'
+ )
else:
raise ex
@@ -875,7 +891,8 @@ def get_organizations_data(session: botocore.session.Session) -> OrganizationTre
None, # get SCPs later
None, # get account list later
[], # caller is responsible for creating and setting the edge list
- {'pmapper_version': principalmapper.__version__}
+ {'pmapper_version': principalmapper.__version__},
+ partition
)
scp_list = []
@@ -904,7 +921,7 @@ def _get_scps_for_target(target_id: str) -> List[Policy]:
desc_policy_resp = orgsclient.describe_policy(PolicyId=policy_arn.split('/')[-1])
scps_result.append(Policy(policy_arn, policy_name, json.loads(desc_policy_resp['Policy']['Content'])))
- logger.debug('SCPs of {}: {}'.format(target_id, [x.arn for x in scps_result]))
+ logger.debug(f'SCPs of {target_id}: {[x.arn for x in scps_result]}')
scp_list.extend(scps_result)
return scps_result
@@ -918,7 +935,7 @@ def _get_tags_for_target(target_id: str) -> dict:
for tag in ltp_page['Tags']:
target_tags[tag['Key']] = tag['Value']
- logger.debug('Tags for {}: {}'.format(target_id, target_tags))
+ logger.debug(f'Tags for {target_id}: {target_tags}')
return target_tags
# for each root, recursively grab child OUs while filling out OrganizationNode/OrganizationAccount objects
@@ -1002,4 +1019,4 @@ def _get_policy_by_arn_or_raise(arn: str, policies: List[Policy]) -> Policy:
for policy in policies:
if arn == policy.arn:
return policy
- raise ValueError('Could not locate policy {}.'.format(arn))
+ raise ValueError(f'Could not locate policy {arn}.')
diff --git a/principalmapper/graphing/glue_edges.py b/principalmapper/graphing/glue_edges.py
new file mode 100644
index 0000000..5bc1b68
--- /dev/null
+++ b/principalmapper/graphing/glue_edges.py
@@ -0,0 +1,186 @@
+"""Code to identify if a principal in an AWS account can use access to AWS Glue to access other principals."""
+
+
+# Copyright (c) NCC Group and Erik Steringer 2019. This file is part of Principal Mapper.
+#
+# Principal Mapper is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Principal Mapper is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Principal Mapper. If not, see .
+
+import logging
+from typing import Dict, List, Optional
+
+from botocore.exceptions import ClientError
+
+from principalmapper.common import Edge, Node
+from principalmapper.graphing.edge_checker import EdgeChecker
+from principalmapper.querying import query_interface
+from principalmapper.querying.local_policy_simulation import resource_policy_authorization, ResourcePolicyEvalResult
+from principalmapper.util import arns, botocore_tools
+
+logger = logging.getLogger(__name__)
+
+
+class GlueEdgeChecker(EdgeChecker):
+ """Class for identifying if Glue can be used by IAM principals to gain access to other IAM principals."""
+
+ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
+ region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
+ """Fulfills expected method return_edges."""
+
+ logger.info('Generating Edges based on Glue.')
+
+ # Gather projects information for each region
+
+ if client_args_map is None:
+ glueargs = {}
+ else:
+ glueargs = client_args_map.get('glue', {})
+
+ glue_clients = []
+ if self.session is not None:
+ cf_regions = botocore_tools.get_regions_to_search(self.session, 'glue', region_allow_list, region_deny_list, partition)
+ for region in cf_regions:
+ glue_clients.append(self.session.create_client('glue', region_name=region, **glueargs))
+
+ endpoint_role_list = []
+ for glue_client in glue_clients:
+ current_region = glue_client.meta.region_name
+ logger.debug(f'Looking at region {current_region}')
+
+ try:
+ # paginate thru existing Glue Dev Endpoints
+ for page in glue_client.get_paginator('get_dev_endpoints').paginate():
+ for endpoint in page['DevEndpoints']:
+ role_node = None
+ if 'RoleArn' in endpoint:
+ for node in nodes:
+ if node.arn == endpoint['RoleArn']:
+ role_node = node
+ break
+
+ if len(nodes) == 0:
+ break # causes false-negatives if there's no users/roles in the account
+ endpoint_arn = f'arn:{partition}:glue:{current_region}:{arns.get_account_id(nodes[0].arn)}:' \
+ f'devEndpoint/{endpoint["EndpointName"]}'
+ endpoint_role_list.append((endpoint_arn, role_node))
+
+ except ClientError as ex:
+ logger.warning('Unable to search region {} for projects. The region may be disabled, or the error may '
+ 'be caused by an authorization issue. Continuing.'.format(glue_client.meta.region_name))
+ logger.debug('Exception details: {}'.format(ex))
+
+ result = generate_edges_locally(nodes, scps, endpoint_role_list)
+
+ for edge in result:
+ logger.info("Found new edge: {}".format(edge.describe_edge()))
+
+ return result
+
+
+def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] = None,
+ endpoint_role_list: Optional[List[tuple]] = None) -> List[Edge]:
+
+ results = []
+
+ # to make things faster, we build a Role -> Endpoint map to reduce iterations through endpoint_role_list
+ node_endpoint_map = {}
+ if endpoint_role_list is not None:
+ for endpoint_arn, role_node in endpoint_role_list:
+ if role_node is not None and role_node not in node_endpoint_map:
+ node_endpoint_map[role_node] = [endpoint_arn]
+ else:
+ node_endpoint_map[role_node].append(endpoint_arn)
+
+ # for all potential destination nodes...
+ for node_destination in nodes:
+
+ # filter down to roles...
+ if ':role/' not in node_destination.arn:
+ continue
+
+ # filter down to roles assumable by glue.amazonaws.com
+ sim_result = resource_policy_authorization(
+ 'glue.amazonaws.com',
+ arns.get_account_id(node_destination.arn),
+ node_destination.trust_policy,
+ 'sts:AssumeRole',
+ node_destination.arn,
+ {},
+ )
+ if sim_result != ResourcePolicyEvalResult.SERVICE_MATCH:
+ continue # Glue wasn't auth'd to assume the role
+
+ for node_source in nodes:
+ # skip self-access checks
+ if node_source == node_destination:
+ continue
+
+ # check if source is an admin: if so, it can access destination but this is not tracked via an Edge
+ if node_source.is_admin:
+ continue
+
+ # check if source can use existing endpoints to access destination
+ if node_destination in node_endpoint_map:
+ for target_endpoint in node_endpoint_map[node_destination]:
+ update_ep_auth, update_ep_needs_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'glue:UpdateDevEndpoint',
+ target_endpoint,
+ {},
+ service_control_policy_groups=scps
+ )
+ if update_ep_auth:
+ if update_ep_needs_mfa:
+ reason = f'(requires MFA) can use the Glue resource {target_endpoint} to access'
+ else:
+ reason = f'can use the Glue resource {target_endpoint} to access'
+ results.append(Edge(
+ node_source,
+ node_destination,
+ reason,
+ 'Glue'
+ ))
+ break
+
+ # check if source can create a new endpoint to access destination
+ passrole_auth, passrole_needs_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'iam:PassRole',
+ node_destination.arn,
+ {'iam:PassedToService': 'glue.amazonaws.com'},
+ service_control_policy_groups=scps
+ )
+
+ if passrole_auth:
+ create_ep_auth, create_ep_needs_mfa = query_interface.local_check_authorization_handling_mfa(
+ node_source,
+ 'glue:CreateDevEndpoint',
+ '*',
+ {},
+ service_control_policy_groups=scps
+ )
+
+ if create_ep_auth:
+ if passrole_needs_mfa or create_ep_needs_mfa:
+ reason = '(requires MFA) can call glue:CreateDevEndpoint to access'
+ else:
+ reason = 'can call glue:CreateDevEndpoint to access'
+ results.append(Edge(
+ node_source,
+ node_destination,
+ reason,
+ 'Glue'
+ ))
+
+ return results
diff --git a/principalmapper/graphing/graph_actions.py b/principalmapper/graphing/graph_actions.py
index bb317e8..bc41835 100644
--- a/principalmapper/graphing/graph_actions.py
+++ b/principalmapper/graphing/graph_actions.py
@@ -23,6 +23,7 @@
import botocore.session
from principalmapper.common import Graph
from principalmapper.graphing import gathering
+from principalmapper.util import arns
from principalmapper.util.storage import get_default_graph_path
from typing import List, Optional
@@ -43,18 +44,21 @@ def print_graph_data(graph: Graph) -> None:
"""Given a Graph object, prints a small amount of information about the Graph. This fulfills
`pmapper graph display`, and also gets ran after `pmapper graph --create`.
"""
- print('Graph Data for Account: {}'.format(graph.metadata['account_id']))
+ if graph.partition == 'aws':
+ print(f'Graph Data for Account: {graph.account}')
+ else:
+ print(f'Graph Data for Account: {graph.partition}:{graph.account}')
if 'org-id' in graph.metadata:
- print(' Organization: {}'.format(graph.metadata['org-id']))
- print(' OU Path: {}'.format(graph.metadata['org-path']))
+ print(f' Organization: {graph.metadata["org-id"]}')
+ print(f' OU Path: {graph.metadata["org-path"]}')
admin_count = 0
for node in graph.nodes:
if node.is_admin:
admin_count += 1
- print(' # of Nodes: {} ({} admins)'.format(len(graph.nodes), admin_count))
- print(' # of Edges: {}'.format(len(graph.edges)))
- print(' # of Groups: {}'.format(len(graph.groups)))
- print(' # of (tracked) Policies: {}'.format(len(graph.policies)))
+ print(f' # of Nodes: {len(graph.nodes)} ({admin_count} admins)')
+ print(f' # of Edges: {len(graph.edges)}')
+ print(f' # of Groups: {len(graph.groups)}')
+ print(f' # of (tracked) Policies: {len(graph.policies)}')
def get_graph_from_disk(location: str) -> Graph:
@@ -76,8 +80,15 @@ def get_existing_graph(session: Optional[botocore.session.Session], account: Opt
elif session is not None:
stsclient = session.create_client('sts')
response = stsclient.get_caller_identity()
- logger.debug('Loading graph based on sts:GetCallerIdentity result: {}'.format(response['Account']))
- graph = get_graph_from_disk(os.path.join(get_default_graph_path(response['Account'])))
+ partition = arns.get_partition(response['Arn'])
+ if partition == 'aws':
+ logger.debug('Loading graph based on sts:GetCallerIdentity result: {}'.format(response['Account']))
+ graph = get_graph_from_disk(os.path.join(get_default_graph_path(response['Account'])))
+ else:
+ logger.debug('Loading graph based on sts:GetCallerIdentity result: {}:{}'.format(partition, response['Account']))
+ graph = get_graph_from_disk(
+ get_default_graph_path(f'{partition}:{response["Account"]}')
+ )
else:
raise ValueError('One of the parameters `account` or `session` must not be None')
return graph
diff --git a/principalmapper/graphing/graph_cli.py b/principalmapper/graphing/graph_cli.py
index f31342f..5fb2869 100644
--- a/principalmapper/graphing/graph_cli.py
+++ b/principalmapper/graphing/graph_cli.py
@@ -28,8 +28,8 @@
from principalmapper.graphing.gathering import get_organizations_data
from principalmapper.graphing.edge_identification import checker_map
from principalmapper.querying import query_orgs
-from principalmapper.util import botocore_tools
-from principalmapper.util.storage import get_storage_root
+from principalmapper.util import botocore_tools, arns
+from principalmapper.util.storage import get_storage_root, get_default_graph_path
logger = logging.getLogger(__name__)
@@ -149,9 +149,13 @@ def process_arguments(parsed_args: Namespace):
stsclient = session.create_client('sts')
caller_identity = stsclient.get_caller_identity()
caller_account = caller_identity['Account']
+ partition = arns.get_partition(caller_identity['Arn'])
logger.debug("Caller Identity: {}".format(caller_identity))
- org_tree_search_dir = Path(get_storage_root())
+ if partition == 'aws':
+ org_tree_search_dir = Path(get_storage_root())
+ else:
+ org_tree_search_dir = Path(os.path.join(get_storage_root(), partition))
org_id_pattern = re.compile(r'/o-\w+')
for subdir in org_tree_search_dir.iterdir():
if org_id_pattern.search(str(subdir)) is not None:
@@ -179,7 +183,12 @@ def process_arguments(parsed_args: Namespace):
graph = graph_actions.create_new_graph(session, service_list, parsed_args.include_regions,
parsed_args.exclude_regions, scps, client_args_map)
graph_actions.print_graph_data(graph)
- graph.store_graph_as_json(os.path.join(get_storage_root(), graph.metadata['account_id']))
+ if graph.partition == 'aws':
+ graphid = graph.account
+ else:
+ graphid = f'{graph.partition}:{graph.account}'
+
+ graph.store_graph_as_json(get_default_graph_path(graphid))
elif parsed_args.picked_graph_cmd == 'display':
if parsed_args.account is None:
@@ -206,4 +215,14 @@ def process_arguments(parsed_args: Namespace):
version = account_metadata['pmapper_version']
print("{} (PMapper Version {})".format(direct.name, version))
+ partition_pattern = re.compile(r'aws.*')
+ for direct in storage_root.iterdir():
+ if partition_pattern.search(str(direct)) is not None:
+ for subdirect in direct.iterdir():
+ if account_id_pattern.search(str(subdirect)) is not None:
+ metadata_file = subdirect.joinpath(Path('metadata.json'))
+ with open(str(metadata_file)) as fd:
+ version = json.load(fd)['pmapper_version']
+ print(f'{direct.name}:{subdirect.name} (PMapper Version {version})')
+
return 0
diff --git a/principalmapper/graphing/iam_edges.py b/principalmapper/graphing/iam_edges.py
index 6caa3a7..7ccd774 100644
--- a/principalmapper/graphing/iam_edges.py
+++ b/principalmapper/graphing/iam_edges.py
@@ -34,7 +34,7 @@ class IAMEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges."""
logger.info('Generating Edges based on IAM')
diff --git a/principalmapper/graphing/lambda_edges.py b/principalmapper/graphing/lambda_edges.py
index 7f6c830..c4531b7 100644
--- a/principalmapper/graphing/lambda_edges.py
+++ b/principalmapper/graphing/lambda_edges.py
@@ -37,7 +37,7 @@ class LambdaEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges. If session object is None, runs checks in offline mode."""
logger.info('Pulling data on Lambda functions')
@@ -49,7 +49,7 @@ def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]]
lambda_clients = []
if self.session is not None:
- lambda_regions = botocore_tools.get_regions_to_search(self.session, 'lambda', region_allow_list, region_deny_list)
+ lambda_regions = botocore_tools.get_regions_to_search(self.session, 'lambda', region_allow_list, region_deny_list, partition)
for region in lambda_regions:
lambda_clients.append(self.session.create_client('lambda', region_name=region, **lambdaargs))
diff --git a/principalmapper/graphing/orgs_cli.py b/principalmapper/graphing/orgs_cli.py
index 32ad3f5..b55cd44 100644
--- a/principalmapper/graphing/orgs_cli.py
+++ b/principalmapper/graphing/orgs_cli.py
@@ -29,7 +29,7 @@
from principalmapper.graphing.gathering import get_organizations_data
from principalmapper.querying.query_orgs import produce_scp_list
from principalmapper.util import botocore_tools
-from principalmapper.util.storage import get_storage_root
+from principalmapper.util.storage import get_storage_root, get_default_graph_path
logger = logging.getLogger(__name__)
@@ -106,7 +106,8 @@ def process_arguments(parsed_args: Namespace):
# create the account -> OU path map and apply to all accounts (same as orgs update operation)
account_ou_map = _map_account_ou_paths(org_tree)
logger.debug('account_ou_map: {}'.format(account_ou_map))
- _update_accounts_with_ou_path_map(org_tree.org_id, account_ou_map, get_storage_root())
+ root_path = get_storage_root() if org_tree.partition == 'aws' else os.path.join(get_storage_root(), org_tree.partition)
+ _update_accounts_with_ou_path_map(org_tree.org_id, account_ou_map, root_path)
logger.info('Updated currently stored Graphs with applicable AWS Organizations data')
# create and cache a list of edges between all the accounts we have data for
@@ -114,7 +115,11 @@ def process_arguments(parsed_args: Namespace):
graph_objs = []
for account in org_tree.accounts:
try:
- potential_path = os.path.join(get_storage_root(), account)
+ if org_tree.partition != 'aws':
+ potential_path = get_default_graph_path(f'{org_tree.partition}:{account}')
+ else:
+ potential_path = get_default_graph_path(account)
+
logger.debug('Trying to load a Graph from {}'.format(potential_path))
graph_obj = Graph.create_graph_from_local_disk(potential_path)
graph_objs.append(graph_obj)
@@ -135,18 +140,23 @@ def process_arguments(parsed_args: Namespace):
org_tree.edge_list = edge_list
logger.info('Compiled cross-account edges')
- org_tree.save_organization_to_disk(os.path.join(get_storage_root(), org_tree.org_id))
+ if org_tree.partition != 'aws':
+ org_storage_path = get_default_graph_path(f'{org_tree.partition}:{org_tree.org_id}')
+ else:
+ org_storage_path = get_default_graph_path(org_tree.org_id)
+ org_tree.save_organization_to_disk(org_storage_path)
logger.info('Stored organization data to disk')
elif parsed_args.picked_orgs_cmd == 'update':
# pull the existing data from disk
- org_filepath = os.path.join(get_storage_root(), parsed_args.org)
+ org_filepath = get_default_graph_path(parsed_args.org)
org_tree = OrganizationTree.create_from_dir(org_filepath)
# create the account -> OU path map and apply to all accounts
account_ou_map = _map_account_ou_paths(org_tree)
logger.debug('account_ou_map: {}'.format(account_ou_map))
- _update_accounts_with_ou_path_map(org_tree.org_id, account_ou_map, get_storage_root())
+ root_path = get_storage_root() if org_tree.partition == 'aws' else os.path.join(get_storage_root(), org_tree.partition)
+ _update_accounts_with_ou_path_map(org_tree.org_id, account_ou_map, root_path)
logger.info('Updated currently stored Graphs with applicable AWS Organizations data')
# create and cache a list of edges between all the accounts we have data for
@@ -154,7 +164,10 @@ def process_arguments(parsed_args: Namespace):
graph_objs = []
for account in org_tree.accounts:
try:
- potential_path = os.path.join(get_storage_root(), account)
+ if org_tree.partition != 'aws':
+ potential_path = get_default_graph_path(f'{org_tree.partition}:{account}')
+ else:
+ potential_path = get_default_graph_path(account)
logger.debug('Trying to load a Graph from {}'.format(potential_path))
graph_obj = Graph.create_graph_from_local_disk(potential_path)
graph_objs.append(graph_obj)
@@ -175,12 +188,16 @@ def process_arguments(parsed_args: Namespace):
org_tree.edge_list = edge_list
logger.info('Compiled cross-account edges')
- org_tree.save_organization_to_disk(os.path.join(get_storage_root(), org_tree.org_id))
+ if org_tree.partition != 'aws':
+ org_storage_path = get_default_graph_path(f'{org_tree.partition}:{org_tree.org_id}')
+ else:
+ org_storage_path = get_default_graph_path(org_tree.org_id)
+ org_tree.save_organization_to_disk(org_storage_path)
logger.info('Stored organization data to disk')
elif parsed_args.picked_orgs_cmd == 'display':
# pull the existing data from disk
- org_filepath = os.path.join(get_storage_root(), parsed_args.org)
+ org_filepath = get_default_graph_path(parsed_args.org)
org_tree = OrganizationTree.create_from_dir(org_filepath)
def _print_account(org_account: OrganizationAccount, indent_level: int, inherited_scps: List[Policy]):
@@ -209,13 +226,23 @@ def _walk_and_print_ou(org_node: OrganizationNode, indent_level: int, inherited_
print("Organization IDs:")
print("---")
storage_root = Path(get_storage_root())
- account_id_pattern = re.compile(r'o-\w+')
+ org_id_pattern = re.compile(r'o-\w+')
for direct in storage_root.iterdir():
- if account_id_pattern.search(str(direct)) is not None:
+ if org_id_pattern.search(str(direct)) is not None:
metadata_file = direct.joinpath(Path('metadata.json'))
with open(str(metadata_file)) as fd:
version = json.load(fd)['pmapper_version']
- print("{} (PMapper Version {})".format(direct.name, version))
+ print(f"{direct.name} (PMapper Version {version})")
+
+ partition_pattern = re.compile(r'aws.*')
+ for direct in storage_root.iterdir():
+ if partition_pattern.search(str(direct)) is not None:
+ for subdirect in direct.iterdir():
+ if org_id_pattern.search(str(subdirect)) is not None:
+ metadata_file = subdirect.joinpath(Path('metadata.json'))
+ with open(str(metadata_file)) as fd:
+ version = json.load(fd)['pmapper_version']
+ print(f'{direct.name}:{subdirect.name} (PMapper Version {version})')
return 0
diff --git a/principalmapper/graphing/sagemaker_edges.py b/principalmapper/graphing/sagemaker_edges.py
index 30f929f..7cf70a0 100644
--- a/principalmapper/graphing/sagemaker_edges.py
+++ b/principalmapper/graphing/sagemaker_edges.py
@@ -36,7 +36,7 @@ class SageMakerEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""fulfills expected method"""
logger.info('Generating Edges based on SageMaker')
diff --git a/principalmapper/graphing/ssm_edges.py b/principalmapper/graphing/ssm_edges.py
index 0b59d89..1a26572 100644
--- a/principalmapper/graphing/ssm_edges.py
+++ b/principalmapper/graphing/ssm_edges.py
@@ -35,7 +35,7 @@ class SSMEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges. If session object is None, runs checks in offline mode."""
logger.info('Generating Edges based on SSM')
@@ -96,7 +96,7 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
if cmd_auth_res:
reason = 'can call ssm:SendCommand to access an EC2 instance with access to'
if mfa_res_1:
- reason = '(Requires MFA) ' + reason
+ reason = '(requires MFA) ' + reason
result.append(Edge(node_source, node_destination, reason, 'SSM'))
sesh_auth_res, mfa_res_2 = query_interface.local_check_authorization_handling_mfa(
@@ -109,7 +109,7 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
if sesh_auth_res:
reason = 'can call ssm:StartSession to access an EC2 instance with access to'
if mfa_res_2:
- reason = '(Requires MFA) ' + reason
+ reason = '(requires MFA) ' + reason
result.append(Edge(node_source, node_destination, reason, 'SSM'))
return result
diff --git a/principalmapper/graphing/sts_edges.py b/principalmapper/graphing/sts_edges.py
index 34d3bc9..40b72c1 100644
--- a/principalmapper/graphing/sts_edges.py
+++ b/principalmapper/graphing/sts_edges.py
@@ -25,6 +25,7 @@
from principalmapper.querying import query_interface
from principalmapper.querying.local_policy_simulation import resource_policy_authorization, ResourcePolicyEvalResult, has_matching_statement
from principalmapper.util import arns
+from principalmapper.util.case_insensitive_dict import CaseInsensitiveDict
logger = logging.getLogger(__name__)
@@ -35,7 +36,7 @@ class STSEdgeChecker(EdgeChecker):
def return_edges(self, nodes: List[Node], region_allow_list: Optional[List[str]] = None,
region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None,
- client_args_map: Optional[dict] = None) -> List[Edge]:
+ client_args_map: Optional[dict] = None, partition: str = 'aws') -> List[Edge]:
"""Fulfills expected method return_edges. If the session object is None, performs checks in offline-mode"""
result = generate_edges_locally(nodes, scps)
@@ -66,6 +67,7 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
continue
# Check against resource policy
+ rp_mfa_required = False
sim_result = resource_policy_authorization(
node_source,
arns.get_account_id(node_source.arn),
@@ -75,11 +77,22 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
{},
)
- if sim_result == ResourcePolicyEvalResult.DENY_MATCH:
- continue # Node was explicitly denied from assuming the role
-
- if sim_result == ResourcePolicyEvalResult.NO_MATCH:
- continue # Resource policy must match for sts:AssumeRole, even in same-account scenarios
+ if sim_result == ResourcePolicyEvalResult.DENY_MATCH or sim_result == ResourcePolicyEvalResult.NO_MATCH:
+ sim_result = resource_policy_authorization(
+ node_source,
+ arns.get_account_id(node_source.arn),
+ node_destination.trust_policy,
+ 'sts:AssumeRole',
+ node_destination.arn,
+ {
+ 'aws:MultiFactorAuthAge': '1',
+ 'aws:MultiFactorAuthPresent': 'true'
+ }
+ )
+ if sim_result == ResourcePolicyEvalResult.DENY_MATCH or sim_result == ResourcePolicyEvalResult.NO_MATCH:
+ continue
+ else:
+ rp_mfa_required = True # Resource Policy auth check passed when MFA elements set
assume_auth, need_mfa = query_interface.local_check_authorization_handling_mfa(
node_source, 'sts:AssumeRole', node_destination.arn, {}, service_control_policy_groups=scps
@@ -89,21 +102,21 @@ def generate_edges_locally(nodes: List[Node], scps: Optional[List[List[dict]]] =
'Deny',
'sts:AssumeRole',
node_destination.arn,
- {},
+ CaseInsensitiveDict({}),
)
policy_denies_mfa = has_matching_statement(
node_source,
'Deny',
'sts:AssumeRole',
node_destination.arn,
- {
+ CaseInsensitiveDict({
'aws:MultiFactorAuthAge': '1',
'aws:MultiFactorAuthPresent': 'true'
- },
+ }),
)
if assume_auth:
- if need_mfa:
+ if need_mfa or rp_mfa_required:
reason = '(requires MFA) can access via sts:AssumeRole'
else:
reason = 'can access via sts:AssumeRole'
diff --git a/principalmapper/querying/argquery_cli.py b/principalmapper/querying/argquery_cli.py
index d073db4..e82518a 100644
--- a/principalmapper/querying/argquery_cli.py
+++ b/principalmapper/querying/argquery_cli.py
@@ -103,7 +103,7 @@ def process_arguments(parsed_args: Namespace):
else:
session = None
graph = graph_actions.get_existing_graph(session, parsed_args.account)
- logger.debug('Querying against graph {}'.format(graph.metadata['account_id']))
+ logger.debug('Querying against graph {}'.format(graph.account))
# process condition args to generate input dict
conditions = {}
diff --git a/principalmapper/querying/presets/endgame.py b/principalmapper/querying/presets/endgame.py
index 02840f6..72c1117 100644
--- a/principalmapper/querying/presets/endgame.py
+++ b/principalmapper/querying/presets/endgame.py
@@ -24,23 +24,23 @@
_service_resource_exposure_map = {
's3': {
- 'pattern': re.compile(r"^arn:aws:s3:::[^/]+$"),
+ 'pattern': re.compile(r"^arn:\S+:s3:::[^/]+$"),
'actions': ['s3:PutBucketPolicy']
},
'sns': {
- 'pattern': re.compile(r"^arn:aws:sns:[a-z0-9-]+:[0-9]+:.*"),
+ 'pattern': re.compile(r"^arn:\S+:sns:[a-z0-9-]+:[0-9]+:.*"),
'actions': ['sns:AddPermission', 'sns:SetTopicAttributes']
},
'sqs': {
- 'pattern': re.compile(r"^arn:aws:sqs:[a-z0-9-]+:[0-9]+:.*"),
+ 'pattern': re.compile(r"^arn:\S+:sqs:[a-z0-9-]+:[0-9]+:.*"),
'actions': ['sqs:AddPermission', 'sqs:SetQueueAttributes']
},
'kms': {
- 'pattern': re.compile(r"^arn:aws:kms:[a-z0-9-]+:[0-9]+:key/.*"),
+ 'pattern': re.compile(r"^arn:\S+:kms:[a-z0-9-]+:[0-9]+:key/.*"),
'actions': ['kms:PutKeyPolicy']
},
'secretsmanager': {
- 'pattern': re.compile(r"^arn:aws:secretsmanager:[a-z0-9-]+:[0-9]+:.*"),
+ 'pattern': re.compile(r"^arn:\S+:secretsmanager:[a-z0-9-]+:[0-9]+:.*"),
'actions': ['secretsmanager:PutResourcePolicy']
}
}
@@ -87,7 +87,7 @@ def compose_endgame_map(graph: Graph, service_to_include: str = '*', skip_admins
continue
query_result = query_interface.local_check_authorization_full(
- node, action, policy.arn, node.cache['conditions'], policy.policy_doc, graph.metadata['account_id'],
+ node, action, policy.arn, node.cache['conditions'], policy.policy_doc, graph.account,
None, None
)
@@ -102,7 +102,7 @@ def compose_endgame_map(graph: Graph, service_to_include: str = '*', skip_admins
})
query_result = query_interface.local_check_authorization_full(
node, action, policy.arn, conditions_copy, policy.policy_doc,
- graph.metadata['account_id'],
+ graph.account,
None, None
)
if query_result:
diff --git a/principalmapper/querying/presets/wrongadmin.py b/principalmapper/querying/presets/wrongadmin.py
index 3bb3edf..6c23e1c 100644
--- a/principalmapper/querying/presets/wrongadmin.py
+++ b/principalmapper/querying/presets/wrongadmin.py
@@ -87,6 +87,7 @@ def _get_admin_reason(node: Node) -> List[str]:
result = []
logger.debug("Checking if {} is an admin".format(node.searchable_name()))
node_type = arns.get_resource(node.arn).split('/')[0]
+ partition = arns.get_partition(node.arn)
# check if node can modify its own inline policies
if node_type == 'user':
@@ -101,7 +102,7 @@ def _get_admin_reason(node: Node) -> List[str]:
action = 'iam:AttachUserPolicy'
else:
action = 'iam:AttachRolePolicy'
- condition_keys = {'iam:PolicyARN': 'arn:aws:iam::aws:policy/AdministratorAccess'}
+ condition_keys = {'iam:PolicyARN': f'arn:{partition}:iam::aws:policy/AdministratorAccess'}
if query_interface.local_check_authorization_handling_mfa(node, action, node.arn, condition_keys)[0]:
result.append('Can call {} to attach the AdministratorAccess policy to itself'.format(action))
diff --git a/principalmapper/querying/query_cli.py b/principalmapper/querying/query_cli.py
index ab3e057..91e747a 100644
--- a/principalmapper/querying/query_cli.py
+++ b/principalmapper/querying/query_cli.py
@@ -85,7 +85,7 @@ def process_arguments(parsed_args: Namespace):
session = None
graph = graph_actions.get_existing_graph(session, parsed_args.account)
- logger.debug('Querying against graph {}'.format(graph.metadata['account_id']))
+ logger.debug('Querying against graph {}'.format(graph.account))
if parsed_args.with_resource_policy:
resource_policy = query_utils.pull_cached_resource_policy_by_arn(
diff --git a/principalmapper/querying/query_interface.py b/principalmapper/querying/query_interface.py
index 0de95c9..d742487 100644
--- a/principalmapper/querying/query_interface.py
+++ b/principalmapper/querying/query_interface.py
@@ -113,7 +113,7 @@ def search_authorization_across_accounts(graph_scp_pairs: List[Tuple[Graph, Opti
account_id_graph_scp_pair_map = {}
for graph_scp_pair in graph_scp_pairs:
- account_id_graph_scp_pair_map[graph_scp_pair[0].metadata['account_id']] = graph_scp_pair
+ account_id_graph_scp_pair_map[graph_scp_pair[0].account] = graph_scp_pair
source_graph_scp_pair = account_id_graph_scp_pair_map[arns.get_account_id(principal.arn)]
if local_check_authorization_full(principal, action_to_check, resource_to_check, condition_keys_to_check,
@@ -288,7 +288,7 @@ def local_check_authorization_full(principal: Node, action_to_check: str, resour
prepped_condition_keys = _prepare_condition_context(conditions_keys_copy)
prepped_condition_keys.update(_infer_condition_keys(principal, prepped_condition_keys))
- is_not_service_linked_role = not _check_if_service_linked_role(principal)
+ is_not_service_linked_role = not principal.is_service_linked_role()
logger.debug(
'Testing authorization for: principal: {}, action: {}, resource: {}, conditions: {}, Resource Policy: {}, SCPs: {}, Session Policy: {}'.format(
@@ -402,17 +402,6 @@ def local_check_authorization_full(principal: Node, action_to_check: str, resour
return False
-def _check_if_service_linked_role(principal: Node) -> bool:
- """Given a Node, determine if it should be treated as a service-linked role. This affects SCP policy decisions as
- described in
- https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html#not-restricted-by-scp"""
-
- if ':role/' in principal.arn:
- role_name = principal.arn.split('/')[-1]
- return role_name.startswith('AWSServiceRoleFor')
- return False
-
-
def simulation_api_check_authorization(iamclient, principal: Node, action_to_check: str, resource_to_check: str,
condition_keys_to_check: dict) -> bool:
"""DO NOT USE THIS FUNCTION, IT WILL ONLY THROW A NotImplementedError."""
diff --git a/principalmapper/querying/query_orgs.py b/principalmapper/querying/query_orgs.py
index 7b62bf2..abf4e15 100644
--- a/principalmapper/querying/query_orgs.py
+++ b/principalmapper/querying/query_orgs.py
@@ -90,10 +90,9 @@ def produce_scp_list(graph: Graph, org: OrganizationTree) -> Optional[List[List[
want in that case."""
if 'org-id' not in graph.metadata or 'org-path' not in graph.metadata:
- raise ValueError('Given graph for account {} does not have AWS Organizations data (try running '
- '`pmapper orgs create/update`).')
+ raise ValueError(f'Given Graph for account {graph.account} does not have AWS Organizations data')
- if graph.metadata['account_id'] == org.management_account_id:
+ if graph.account == org.management_account_id:
return None
result = []
@@ -101,6 +100,6 @@ def produce_scp_list(graph: Graph, org: OrganizationTree) -> Optional[List[List[
# org-path is in the form '//[///]' so we split and start from [1]
org_path_parts = graph.metadata['org-path'].split('/')
- _grab_policies_and_traverse(org.root_ous, org_path_parts, 1, graph.metadata['account_id'], result)
+ _grab_policies_and_traverse(org.root_ous, org_path_parts, 1, graph.account, result)
return result
diff --git a/principalmapper/querying/query_utils.py b/principalmapper/querying/query_utils.py
index e625566..8f412ff 100644
--- a/principalmapper/querying/query_utils.py
+++ b/principalmapper/querying/query_utils.py
@@ -36,10 +36,11 @@ def get_search_list(graph: Graph, node: Node) -> List[List[Edge]]:
result = []
explored_nodes = []
- # Special-case: node is an "admin", so we make up admin edges and return them all
+ # Special-case: node is an "admin", so we make up admin edges and return them all. BUT, if the destination
+ # node is the original node or a service-linked role, then we skip those
if node.is_admin:
for other_node in graph.nodes:
- if node == other_node:
+ if node == other_node or other_node.is_service_linked_role():
continue
result.append([Edge(node, other_node, 'can access through administrative actions', 'Admin')])
return result
@@ -158,7 +159,7 @@ def pull_resource_policy_by_arn(session: botocore.session.Session, arn: Optional
elif service == 's3':
# arn:aws:s3:::/
client = session.create_client('s3')
- bucket_name = arns.get_resource(arn).split('arn:aws:s3:::')[-1].split('/')[0]
+ bucket_name = arns.get_resource(arn).split(':s3:::')[-1].split('/')[0]
logger.debug('Calling S3 API to retrieve bucket policy of {}'.format(bucket_name))
bucket_policy = json.loads(client.get_bucket_policy(Bucket=bucket_name)['Policy'])
return bucket_policy
@@ -199,7 +200,7 @@ def get_interaccount_search_list(all_graphs: List[Graph], inter_account_edges: L
account_id_graph_map = {}
for graph in all_graphs:
- account_id_graph_map[graph.metadata['account_id']] = graph
+ account_id_graph_map[graph.account] = graph
# Get initial list of edges
first_set = get_edges_interaccount(account_id_graph_map[arns.get_account_id(node.arn)], inter_account_edges, node, nodes_found)
@@ -225,9 +226,8 @@ def get_interaccount_search_list(all_graphs: List[Graph], inter_account_edges: L
def get_edges_interaccount(source_graph: Graph, inter_account_edges: List[Edge], node: Node, ignored_nodes: List[Node]) -> List[Edge]:
"""Given a Node, the Graph it belongs to, a list of inter-account Edges, and a list of Nodes to skip, this returns
- any Edges where the Node is the source element as long as the destination element isn't included in the skipped Nodes.
-
- If the given node is an admin, those Edge objects get generated and returned.
+ any Edges where the Node is the source element as long as the destination element isn't included in the skipped
+ Nodes.
"""
result = []
@@ -241,3 +241,14 @@ def get_edges_interaccount(source_graph: Graph, inter_account_edges: List[Edge],
result.append(inter_account_edge)
return result
+
+
+def check_if_service_linked_role(principal: Node) -> bool:
+ """Given a Node, determine if it should be treated as a service-linked role. This affects SCP policy decisions as
+ described in
+ https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html#not-restricted-by-scp"""
+
+ if ':role/' in principal.arn:
+ role_name = principal.arn.split('/')[-1]
+ return role_name.startswith('AWSServiceRoleFor')
+ return False
diff --git a/principalmapper/util/botocore_tools.py b/principalmapper/util/botocore_tools.py
index 4712e6e..a325891 100644
--- a/principalmapper/util/botocore_tools.py
+++ b/principalmapper/util/botocore_tools.py
@@ -46,7 +46,9 @@ def get_session(profile_arg: Optional[str], stsargs: Optional[dict] = None) -> b
return result
-def get_regions_to_search(session: botocore.session.Session, service_name: str, region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None) -> List[str]:
+def get_regions_to_search(session: botocore.session.Session, service_name: str,
+ region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None,
+ partition: str = 'aws') -> List[str]:
"""Using a botocore Session object, the name of a service, and either an allow-list or a deny-list (but not both),
return a list of regions to be used during the gathering process. This uses the botocore Session object's
get_available_regions method as the base list.
@@ -58,12 +60,14 @@ def get_regions_to_search(session: botocore.session.Session, service_name: str,
thrown if a region is specified inthe deny-list but not included in the base list.
A ValueError is thrown if the allow-list AND deny-list are both not None.
+
+ * **v1.2.0:** Added partition support (default to 'aws')
"""
if region_allow_list is not None and region_deny_list is not None:
raise ValueError('This function allows only either the allow-list or the deny-list, but NOT both.')
- base_list = session.get_available_regions(service_name)
+ base_list = session.get_available_regions(service_name, partition)
result = []
@@ -78,6 +82,6 @@ def get_regions_to_search(session: botocore.session.Session, service_name: str,
else:
result = base_list
- logger.debug('Final list of regions for {}: {}'.format(service_name, result))
+ logger.debug(f'Final list of regions for {service_name}: {result}')
return result
diff --git a/principalmapper/util/case_insensitive_dict.py b/principalmapper/util/case_insensitive_dict.py
index ff5ee90..695c036 100644
--- a/principalmapper/util/case_insensitive_dict.py
+++ b/principalmapper/util/case_insensitive_dict.py
@@ -31,7 +31,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from collections import Mapping, MutableMapping, OrderedDict
+from collections import OrderedDict
+try:
+ from collections import Mapping, MutableMapping
+except ImportError:
+ from collections.abc import Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
diff --git a/principalmapper/util/storage.py b/principalmapper/util/storage.py
index 695371e..ef82c70 100644
--- a/principalmapper/util/storage.py
+++ b/principalmapper/util/storage.py
@@ -56,5 +56,16 @@ def get_storage_root():
def get_default_graph_path(account_or_org: str):
- """Returns a path to a given account or organization by the provided string."""
- return os.path.join(get_storage_root(), account_or_org)
+ """Returns a path to a given account or organization by the provided string.
+
+ * **v1.2.0**: Added partition support, expected format is :. If there's no partition,
+ default is 'aws'. If partition is not 'aws', then we add that to the directories we hop through.
+ """
+ if ':' in account_or_org:
+ argparts = account_or_org.split(':')
+ basedir = os.path.join(get_storage_root(), argparts[0])
+ acctid = argparts[1]
+ else:
+ basedir = get_storage_root()
+ acctid = account_or_org
+ return os.path.join(basedir, acctid)
diff --git a/principalmapper/visualizing/cli.py b/principalmapper/visualizing/cli.py
index f7ae9f7..8bb0ec0 100644
--- a/principalmapper/visualizing/cli.py
+++ b/principalmapper/visualizing/cli.py
@@ -55,11 +55,11 @@ def process_arguments(parsed_args: Namespace):
graph = graph_actions.get_existing_graph(session, parsed_args.account)
if parsed_args.only_privesc:
- filepath = './{}-privesc-risks.{}'.format(graph.metadata['account_id'], parsed_args.filetype)
+ filepath = './{}-privesc-risks.{}'.format(graph.account, parsed_args.filetype)
graph_writer.draw_privesc_paths(graph, filepath, parsed_args.filetype)
else:
# create file
- filepath = './{}.{}'.format(graph.metadata['account_id'], parsed_args.filetype)
+ filepath = './{}.{}'.format(graph.account, parsed_args.filetype)
graph_writer.handle_request(graph, filepath, parsed_args.filetype, parsed_args.with_services)
print('Created file {}'.format(filepath))
diff --git a/principalmapper/visualizing/graphviz_writer.py b/principalmapper/visualizing/graphviz_writer.py
index c858da4..a270fe8 100644
--- a/principalmapper/visualizing/graphviz_writer.py
+++ b/principalmapper/visualizing/graphviz_writer.py
@@ -29,7 +29,7 @@ def write_standard_graphviz(graph: Graph, filepath: str, file_format: str, with_
# Load graph data into pydot
pydg = pydot.Dot(
graph_type='digraph',
- graph_name='Principal Mapper Visualization: {}'.format(graph.metadata['account_id']),
+ graph_name='Principal Mapper Visualization: {}'.format(graph.account),
overlap='scale',
layout='neato',
concentrate='true',
diff --git a/requirements.txt b/requirements.txt
index ce3865e..fba42ab 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
botocore > 1.13
-packaging
+packaging >= 21.0
python-dateutil
pydot
\ No newline at end of file
diff --git a/setup.py b/setup.py
index ea072fa..87d73c0 100644
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
include_package_data=True,
packages=find_packages(exclude=("tests", )),
package_data={},
- python_requires='>=3.5, <4', # assume Python 4 will break
+ python_requires='>=3.6, <4', # assume Python 4 will break
install_requires=['botocore', 'packaging', 'python-dateutil', 'pydot'],
entry_points={
'console_scripts': [
@@ -57,6 +57,7 @@
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
'Topic :: Security'
],
keywords=[
diff --git a/tests/build_test_graphs.py b/tests/build_test_graphs.py
index 82c111b..dba7f86 100644
--- a/tests/build_test_graphs.py
+++ b/tests/build_test_graphs.py
@@ -24,7 +24,7 @@
def build_empty_graph() -> Graph:
"""Constructs and returns a Graph object with no nodes, edges, policies, or groups"""
- return Graph([], [], [], [], _get_default_metadata())
+ return Graph([], [], [], [], '000000000000', 'aws', _get_default_metadata())
def build_graph_with_one_admin() -> Graph:
@@ -32,7 +32,7 @@ def build_graph_with_one_admin() -> Graph:
admin_user_arn = 'arn:aws:iam::000000000000:user/admin'
policy = Policy(admin_user_arn, 'InlineAdminPolicy', _get_admin_policy())
node = Node(admin_user_arn, 'AIDA00000000000000000', [policy], [], None, None, 1, True, True, None, False, None)
- return Graph([node], [], [policy], [], _get_default_metadata())
+ return Graph([node], [], [policy], [], '000000000000', 'aws', _get_default_metadata())
# noinspection PyListCreation
@@ -72,6 +72,24 @@ def build_playground_graph() -> Graph:
nodes.append(Node(common_iam_prefix + 'role/s3_access_role', 'AIDA00000000000000003', [s3_full_access_policy], [], root_trusted_policy_doc,
None, 0, False, False, None, False, None))
+ # assumable role with s3 access and MFA required to assume
+ nodes.append(Node(common_iam_prefix + 'role/mfa_role_with_s3_access', 'AIDA0000000000000099', [s3_full_access_policy], [],
+ {
+ 'Version': '2012-10-17',
+ 'Statement': [
+ {
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': 'arn:aws:iam::000000000000:root'},
+ 'Action': 'sts:AssumeRole',
+ 'Condition': {
+ 'Bool': {
+ 'aws:MultiFactorAuthPresent': 'true'
+ }
+ }
+ }
+ ]
+ }, None, 0, False, False, None, False, None))
+
# second assumable role with s3 access with alternative trust policy
nodes.append(Node(common_iam_prefix + 'role/s3_access_role_alt', 'AIDA00000000000000004', [s3_full_access_policy], [],
alt_root_trusted_policy_doc, None, 0, False, False, None, False, None))
@@ -81,7 +99,7 @@ def build_playground_graph() -> Graph:
other_acct_trusted_policy_doc, None, 0, False, False, None, False, None))
# jump user with access to sts:AssumeRole
- nodes.append(Node(common_iam_prefix + 'user/jumpuser', 'AIDA00000000000000006', [jump_policy], [], None, None, 1, True, False, None, False, None))
+ nodes.append(Node(common_iam_prefix + 'user/jumpuser', 'AIDA00000000000000006', [jump_policy], [], None, None, 1, True, False, None, True, None))
# user with S3 access, path in user's ARN
nodes.append(Node(common_iam_prefix + 'user/somepath/some_other_jumpuser', 'AIDA00000000000000007', [jump_policy],
@@ -94,7 +112,7 @@ def build_playground_graph() -> Graph:
# edges to add
edges = obtain_edges(None, checker_map.keys(), nodes)
- return Graph(nodes, edges, policies, [], _get_default_metadata())
+ return Graph(nodes, edges, policies, [], '000000000000', 'aws', _get_default_metadata())
def _get_admin_policy() -> dict:
diff --git a/tests/test_constructors.py b/tests/test_constructors.py
index f508403..84240be 100644
--- a/tests/test_constructors.py
+++ b/tests/test_constructors.py
@@ -26,13 +26,13 @@
class ConstructorTest(unittest.TestCase):
def test_graphs(self):
with self.assertRaises(ValueError):
- Graph(nodes=None, edges=[], policies=[], groups=[])
+ Graph(nodes=None, edges=[], policies=[], groups=[], account='000000000000', partition='aws', metadata={})
with self.assertRaises(ValueError):
- Graph(nodes=[], edges=None, policies=[], groups=[])
+ Graph(nodes=[], edges=None, policies=[], groups=[], account='000000000000', partition='aws', metadata={})
with self.assertRaises(ValueError):
- Graph(nodes=[], edges=[], policies=None, groups=[])
+ Graph(nodes=[], edges=[], policies=None, groups=[], account='000000000000', partition='aws', metadata={})
with self.assertRaises(ValueError):
- Graph(nodes=[], edges=[], policies=[], groups=None)
+ Graph(nodes=[], edges=[], policies=[], groups=None, account='000000000000', partition='aws', metadata={})
def test_nodes(self):
with self.assertRaises(ValueError):
diff --git a/tests/test_edge_identification.py b/tests/test_edge_identification.py
index bfe44bf..750ff63 100644
--- a/tests/test_edge_identification.py
+++ b/tests/test_edge_identification.py
@@ -47,3 +47,9 @@ def test_admin_access(self):
self.assertTrue(is_connected(graph, admin_user_node, jump_user))
self.assertTrue(is_connected(graph, admin_user_node, nonassumable_role_node))
self.assertTrue(is_connected(graph, other_jump_user, other_assumable_role))
+
+ def test_mfa_assume_role(self):
+ graph = build_playground_graph()
+ source_node = graph.get_node_by_searchable_name('user/jumpuser')
+ mfa_target_node = graph.get_node_by_searchable_name('role/mfa_role_with_s3_access')
+ self.assertTrue(is_connected(graph, source_node, mfa_target_node))