diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/ext_runtimes/loader.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/ext_runtimes/loader.py index a2f5e90..1de1be2 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/ext_runtimes/loader.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/ext_runtimes/loader.py @@ -58,7 +58,7 @@ class InvalidRepositoryError(exceptions.Error): """Attempted to fetch or clone from a repository missing something basic. This gets raised if we try to fetch or clone from a repo that is either - missing a HEAD or missing both a "latest" tag and a master branch. + missing a HEAD or missing both a "latest" tag and a main branch. """ @@ -145,7 +145,7 @@ def _PullTags(local_repo, client_wrapper, target_dir): Returns: (str, dulwich.objects.Commit) The tag that was actually pulled (we try to - get "latest" but fall back to "master") and the commit object + get "latest" but fall back to "main") and the commit object associated with it. Raises: @@ -162,7 +162,7 @@ def _PullTags(local_repo, client_wrapper, target_dir): # Try to get the "latest" tag (latest released version) revision = None tag = None - for tag in ('refs/tags/latest', 'refs/heads/master'): + for tag in ('refs/tags/latest', 'refs/heads/main'): try: log.debug('looking up ref %s', tag) revision = local_repo[tag] @@ -171,7 +171,7 @@ def _PullTags(local_repo, client_wrapper, target_dir): log.warn('Unable to checkout branch %s', tag) else: - raise AssertionError('No "refs/heads/master" tag found in repository.') + raise AssertionError('No "refs/heads/main" tag found in repository.') return tag, revision @@ -256,7 +256,7 @@ def InstallRuntimeDef(url, target_dir): directory. At this time, the runtime definition url must be the URL of a git repository and we identify the tree to checkout based on 1) the presence of a "latest" tag ("refs/tags/latest") 2) if there is no "latest" tag, the - head of the "master" branch ("refs/heads/master"). + head of the "main" branch ("refs/heads/main"). Args: url: (str) A URL identifying a git repository. The HTTP, TCP and local diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/api_adapter.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/api_adapter.py index 97c4074..fe19275 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/api_adapter.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/api_adapter.py @@ -472,7 +472,7 @@ class UpdateClusterOptions(object): def __init__(self, version=None, - update_master=None, + update_main=None, update_nodes=None, node_pool=None, monitoring_service=None, @@ -483,7 +483,7 @@ def __init__(self, image_type=None, locations=None): self.version = version - self.update_master = bool(update_master) + self.update_main = bool(update_main) self.update_nodes = bool(update_nodes) self.node_pool = node_pool self.monitoring_service = monitoring_service @@ -548,7 +548,7 @@ def Zone(self, cluster_ref): return cluster_ref.zone def Version(self, cluster): - return cluster.currentMasterVersion + return cluster.currentMainVersion def CreateCluster(self, cluster_ref, options): node_config = self.messages.NodeConfig() @@ -612,7 +612,7 @@ def CreateCluster(self, cluster_ref, options): cluster = self.messages.Cluster( name=cluster_ref.clusterId, nodePools=pools, - masterAuth=self.messages.MasterAuth(username=options.user, + mainAuth=self.messages.MainAuth(username=options.user, password=options.password)) if options.additional_zones: cluster.locations = sorted([cluster_ref.zone] + options.additional_zones) @@ -654,9 +654,9 @@ def UpdateCluster(self, cluster_ref, options): desiredNodeVersion=options.version, desiredNodePoolId=options.node_pool, desiredImageType=options.image_type) - elif options.update_master: + elif options.update_main: update = self.messages.ClusterUpdate( - desiredMasterVersion=options.version) + desiredMainVersion=options.version) elif options.monitoring_service: update = self.messages.ClusterUpdate( desiredMonitoringService=options.monitoring_service) diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/constants.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/constants.py index b425e31..860f4ea 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/constants.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/constants.py @@ -24,6 +24,6 @@ "This will create a cluster with all Kubernetes Alpha features enabled.\n" "- This cluster will not covered by the Container Engine SLA and should " "not be used for production workloads.\n" - "- You will not be able to upgrade the master or nodes.\n" + "- You will not be able to upgrade the main or nodes.\n" "- The cluster will be deleted after 30 days.\n" ) diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/kubeconfig.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/kubeconfig.py index 9aa42ca..5db47a8 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/kubeconfig.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/kubeconfig.py @@ -207,7 +207,7 @@ def _AuthProvider(name='gcp'): Constructs an auth provider config entry readable by kubectl. This tells kubectl to call out to a specific gcloud command and parse the output to - retrieve access tokens to authenticate to the kubernetes master. + retrieve access tokens to authenticate to the kubernetes main. Kubernetes gcp auth provider plugin at https://github.com/kubernetes/kubernetes/blob/master/plugin/pkg/client/auth/gcp/gcp.go diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/transforms.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/transforms.py index d4dea68..a10085b 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/transforms.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/transforms.py @@ -63,16 +63,16 @@ def ParseExpireTime(s): return expire_dt - times.Now(expire_dt.tzinfo) -def TransformMasterVersion(r, undefined=''): - """Returns the formatted master version. +def TransformMainVersion(r, undefined=''): + """Returns the formatted main version. Args: r: JSON-serializable object. undefined: Returns this value if the resource cannot be formatted. Returns: - The formatted master version. + The formatted main version. """ - version = r.get('currentMasterVersion', None) + version = r.get('currentMainVersion', None) if version is None: return undefined if r.get('enableKubernetesAlpha', False): @@ -91,7 +91,7 @@ def TransformMasterVersion(r, undefined=''): _TRANSFORMS = { - 'master_version': TransformMasterVersion, + 'main_version': TransformMainVersion, } diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/util.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/util.py index cc61f42..a6ecd69 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/util.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/util.py @@ -139,8 +139,8 @@ def has_ca_cert(self): @staticmethod def UseGCPAuthProvider(cluster): - return (cluster.currentMasterVersion and - dist_version.LooseVersion(cluster.currentMasterVersion) >= + return (cluster.currentMainVersion and + dist_version.LooseVersion(cluster.currentMainVersion) >= dist_version.LooseVersion(MIN_GCP_AUTH_PROVIDER_VERSION) and not properties.VALUES.container.use_client_certificate.GetBool()) @@ -211,7 +211,7 @@ def Persist(cls, cluster, project_id): 'project_id': project_id, 'server': 'https://' + cluster.endpoint, } - auth = cluster.masterAuth + auth = cluster.mainAuth if auth and auth.clusterCaCertificate: kwargs['ca_data'] = auth.clusterCaCertificate else: diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/import_util.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/import_util.py index 72881cd..07e4447 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/import_util.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/import_util.py @@ -74,7 +74,7 @@ def _SOATranslation(rdata, origin): Returns: str, The translation of the given SOA rdata which includes all the required - SOA fields. Note that the master NS name is left in a substitutable form + SOA fields. Note that the main NS name is left in a substitutable form because it is always provided by Cloud DNS. """ return ' '.join( @@ -230,7 +230,7 @@ def RecordSetsFromZoneFile(zone_file, domain): A (name, type) keyed dict of ResourceRecordSets that were obtained from the zone file. Note that only A, AAAA, CNAME, MX, PTR, SOA, SPF, SRV, and TXT record-sets are retrieved. Other record-set types are not supported by Cloud - DNS. Also, the master NS field for SOA records is discarded since that is + DNS. Also, the main NS field for SOA records is discarded since that is provided by Cloud DNS. """ zone_contents = zone.from_file(zone_file, domain, check_origin=False) @@ -252,7 +252,7 @@ def RecordSetsFromYamlFile(yaml_file): A (name, type) keyed dict of ResourceRecordSets that were obtained from the yaml file. Note that only A, AAAA, CNAME, MX, PTR, SOA, SPF, SRV, and TXT record-sets are retrieved. Other record-set types are not supported by Cloud - DNS. Also, the master NS field for SOA records is discarded since that is + DNS. Also, the main NS field for SOA records is discarded since that is provided by Cloud DNS. """ record_sets = {} @@ -272,7 +272,7 @@ def RecordSetsFromYamlFile(yaml_file): record_set.rrdatas = yaml_record_set['rrdatas'] if rdata_type is rdatatype.SOA: - # Make master NS name substitutable. + # Make main NS name substitutable. record_set.rrdatas[0] = re.sub(r'\S+', '{0}', record_set.rrdatas[0], count=1) @@ -300,14 +300,14 @@ def _RecordSetCopy(record_set): def _SOAReplacement(current_record, record_to_be_imported): - """Returns the replacement SOA record with restored master NS name. + """Returns the replacement SOA record with restored main NS name. Args: current_record: ResourceRecordSet, Current record-set. record_to_be_imported: ResourceRecordSet, Record-set to be imported. Returns: - ResourceRecordSet, the replacement SOA record with restored master NS name. + ResourceRecordSet, the replacement SOA record with restored main NS name. """ replacement = _RecordSetCopy(record_to_be_imported) replacement.rrdatas[0] = replacement.rrdatas[0].format( diff --git a/google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/instances.py b/google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/instances.py index 5b99a46..dc2ab88 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/instances.py +++ b/google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/instances.py @@ -227,11 +227,11 @@ def ConstructInstanceFromArgs(cls, sql_messages, args, instance_resource = sql_messages.DatabaseInstance( region=region, databaseVersion=database_version, - masterInstanceName=getattr(args, 'master_instance_name', None), + mainInstanceName=getattr(args, 'main_instance_name', None), settings=settings) - if hasattr(args, 'master_instance_name'): - if args.master_instance_name: + if hasattr(args, 'main_instance_name'): + if args.main_instance_name: replication = 'ASYNCHRONOUS' activation_policy = 'ALWAYS' if hasattr(args, 'replica_type') and args.replica_type == 'FAILOVER': diff --git a/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/flags.py b/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/flags.py index b6cc228..e69df84 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/flags.py +++ b/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/flags.py @@ -46,7 +46,7 @@ def AddImageTypeFlag(parser, target): def AddClusterVersionFlag(parser, suppressed=False, help=None): # pylint: disable=redefined-builtin """Adds a --cluster-version flag to the given parser.""" help_text = argparse.SUPPRESS if suppressed else help or """\ -The Kubernetes version to use for the master and nodes. Defaults to +The Kubernetes version to use for the main and nodes. Defaults to server-specified. The default Kubernetes version are available using the following command. diff --git a/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/flags.py b/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/flags.py index 31cc68e..8410c00 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/flags.py +++ b/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/flags.py @@ -135,8 +135,8 @@ def GetUserArgs(local=False): cluster specification. When you use this tier, set values to configure your processing cluster according to these guidelines (using the --config flag): -* You _must_ set `TrainingInput.masterType` to specify the type of machine to - use for your master node. This is the only required setting. +* You _must_ set `TrainingInput.mainType` to specify the type of machine to + use for your main node. This is the only required setting. * You _may_ set `TrainingInput.workerCount` to specify the number of workers to use. If you specify one or more workers, you _must_ also set `TrainingInput.workerType` to specify the type of machine to use for your @@ -146,8 +146,8 @@ def GetUserArgs(local=False): _must_ also set `TrainingInput.parameterServerType` to specify the type of machine to use for your parameter servers. Note that all of your workers must use the same machine type, which can be different from your parameter server - type and master type. Your parameter servers must likewise use the same - machine type, which can be different from your worker type and master type.\ + type and main type. Your parameter servers must likewise use the same + machine type, which can be different from your worker type and main type.\ """} SCALE_TIER = base.Argument( '--scale-tier', diff --git a/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/local_train.py b/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/local_train.py index 7e61fd4..40376df 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/local_train.py +++ b/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/local_train.py @@ -30,7 +30,7 @@ def MakeProcess(module_name, **extra_popen_args): """Make a Popen object that runs the module, with the correct env. - If task_type is 'master' instead replaces the current process with the + If task_type is 'main' instead replaces the current process with the subprocess via execution_utils.Exec Args: module_name: str. Name of the module to run, e.g. trainer.task @@ -46,7 +46,7 @@ def MakeProcess(module_name, Returns: a subprocess.Popen object corresponding to the subprocesses or an int corresponding to the return value of the subprocess - (if task_type is 'master') + (if task_type is 'main') """ if args is None: args = [] @@ -68,7 +68,7 @@ def MakeProcess(module_name, # configuration options to the training module. the module specific # arguments are passed as comand line arguments. env['TF_CONFIG'] = json.dumps(config) - if task_type == 'master': + if task_type == 'main': return execution_utils.Exec( cmd, env=env, no_exit=True, cwd=package_root, **extra_popen_args) else: @@ -100,18 +100,18 @@ def RunDistributed(module_name, user_args: [str]. Additional user args for the task. Any relative paths will not work. Returns: - int. the retval of 'master' subprocess + int. the retval of 'main' subprocess """ ports = range(start_port, start_port + num_ps + num_workers + 1) cluster = { - 'master': ['localhost:{port}'.format(port=ports[0])], + 'main': ['localhost:{port}'.format(port=ports[0])], 'ps': ['localhost:{port}'.format(port=p) for p in ports[1:num_ps + 1]], 'worker': ['localhost:{port}'.format(port=p) for p in ports[num_ps + 1:]] } for task_type, addresses in cluster.items(): - if task_type != 'master': + if task_type != 'main': for i in range(len(addresses)): MakeProcess(module_name, package_root, @@ -122,6 +122,6 @@ def RunDistributed(module_name, return MakeProcess(module_name, package_root, args=user_args, - task_type='master', + task_type='main', index=0, cluster=cluster) diff --git a/google-cloud-sdk/lib/googlecloudsdk/command_lib/shell/gcloud_tree.py b/google-cloud-sdk/lib/googlecloudsdk/command_lib/shell/gcloud_tree.py index 60a4705..63b0252 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/command_lib/shell/gcloud_tree.py +++ b/google-cloud-sdk/lib/googlecloudsdk/command_lib/shell/gcloud_tree.py @@ -26348,7 +26348,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The Kubernetes version to use for the master and nodes. Defaults to\nserver-specified.\n+\nThe default Kubernetes version are available using the following command.\n+\n $ gcloud container get-server-config', + u'description': u'The Kubernetes version to use for the main and nodes. Defaults to\nserver-specified.\n+\nThe default Kubernetes version are available using the following command.\n+\n $ gcloud container get-server-config', u'group': u'', u'hidden': False, u'name': u'--cluster-version', @@ -27344,7 +27344,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmaster's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", + u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmain's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", u'group': u'', u'hidden': False, u'name': u'--cluster-version', @@ -27365,15 +27365,15 @@ u'required': False, u'type': u'string', u'value': u'IMAGE_TYPE'}, - u'--master': {u'attr': {}, + u'--main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': False, - u'description': u"Upgrade the cluster's master to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the master.", + u'description': u"Upgrade the cluster's main to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the main.", u'group': u'', u'hidden': False, - u'name': u'--master', + u'name': u'--main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -27391,7 +27391,7 @@ u'required': False, u'type': u'bool', u'value': u'NO_ASYNC'}, - u'--no-master': {u'attr': {}, + u'--no-main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -27399,7 +27399,7 @@ u'description': u'', u'group': u'', u'hidden': True, - u'name': u'--no-master', + u'name': u'--no-main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -27460,8 +27460,8 @@ u'required': False, u'value': u'NAME'}], u'release': u'ALPHA', - u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *master* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", - u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmaster.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, + u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *main* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", + u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmain.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, u'flags': {u'--zone': {u'attr': {u'property': {u'name': u'compute/zone'}}, u'category': u'', u'choices': [], @@ -30441,7 +30441,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The branch that will be used to get the source code of the function. The most recent revision on this branch will be used. Can be specified only together with --source-url parameter. If not specified defaults to `master`.', + u'description': u'The branch that will be used to get the source code of the function. The most recent revision on this branch will be used. Can be specified only together with --source-url parameter. If not specified defaults to `main`.', u'group': u'deploy.2', u'hidden': False, u'name': u'--source-branch', @@ -30493,7 +30493,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The Url of a remote repository that holds the function being deployed. It is of the form: https://source.developers.google.com/p/{project_id}/r/{repo_name}/, where you should substitute your data for values inside the curly brackets. You can omit "r/{repo_name}/" in which case the "default" repository is taken. One of the parameters --source-revision, --source-branch, or --source-tag can be given to specify the version in the repository. If none of them are provided, the last revision from the master branch is used. If this parameter is given, the parameter --source is required and describes the path inside the repository.', + u'description': u'The Url of a remote repository that holds the function being deployed. It is of the form: https://source.developers.google.com/p/{project_id}/r/{repo_name}/, where you should substitute your data for values inside the curly brackets. You can omit "r/{repo_name}/" in which case the "default" repository is taken. One of the parameters --source-revision, --source-branch, or --source-tag can be given to specify the version in the repository. If none of them are provided, the last revision from the main branch is used. If this parameter is given, the parameter --source is required and describes the path inside the repository.', u'group': u'deploy.3', u'hidden': False, u'name': u'--source-url', @@ -33255,7 +33255,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'A comma-delimited list of URI patterns referencing existing VCF or MasterVar files in Google Cloud Storage.', + u'description': u'A comma-delimited list of URI patterns referencing existing VCF or MainVar files in Google Cloud Storage.', u'group': u'', u'hidden': False, u'name': u'--source-uris', @@ -33287,7 +33287,7 @@ u'import'], u'positionals': [], u'release': u'ALPHA', - u'sections': {u'DESCRIPTION': u'*(ALPHA)* Import variants from VCF or MasterVar files that are in Google Cloud Storage.'}}, + u'sections': {u'DESCRIPTION': u'*(ALPHA)* Import variants from VCF or MainVar files that are in Google Cloud Storage.'}}, u'list': {u'capsule': u'*(ALPHA)* Lists variants that match the search criteria.', u'commands': {}, u'flags': {u'--call-set-ids': {u'attr': {}, @@ -34395,7 +34395,7 @@ u'STANDARD_1'], u'completer': u'', u'default': None, - u'description': u'Specifies the machine types, the number of replicas for workers and parameter servers. _SCALE_TIER_ must be one of:\n+\n*BASIC*::: A single worker instance. This tier is suitable for learning how to use Cloud ML, and for experimenting with new models using small datasets.\n*BASIC_GPU*::: A single worker instance with a GPU.\n*CUSTOM*::: The CUSTOM tier is not a set tier, but rather enables you to use your own\ncluster specification. When you use this tier, set values to configure your\nprocessing cluster according to these guidelines (using the --config flag):\n+\n* You _must_ set `TrainingInput.masterType` to specify the type of machine to\n use for your master node. This is the only required setting.\n* You _may_ set `TrainingInput.workerCount` to specify the number of workers to\n use. If you specify one or more workers, you _must_ also set\n `TrainingInput.workerType` to specify the type of machine to use for your\n worker nodes.\n* You _may_ set `TrainingInput.parameterServerCount` to specify the number of\n parameter servers to use. If you specify one or more parameter servers, you\n _must_ also set `TrainingInput.parameterServerType` to specify the type of\n machine to use for your parameter servers. Note that all of your workers must\n use the same machine type, which can be different from your parameter server\n type and master type. Your parameter servers must likewise use the same\n machine type, which can be different from your worker type and master type.\n*PREMIUM_1*::: A large number of workers with many parameter servers.\n*STANDARD_1*::: Many workers and a few parameter servers.\n+', + u'description': u'Specifies the machine types, the number of replicas for workers and parameter servers. _SCALE_TIER_ must be one of:\n+\n*BASIC*::: A single worker instance. This tier is suitable for learning how to use Cloud ML, and for experimenting with new models using small datasets.\n*BASIC_GPU*::: A single worker instance with a GPU.\n*CUSTOM*::: The CUSTOM tier is not a set tier, but rather enables you to use your own\ncluster specification. When you use this tier, set values to configure your\nprocessing cluster according to these guidelines (using the --config flag):\n+\n* You _must_ set `TrainingInput.mainType` to specify the type of machine to\n use for your main node. This is the only required setting.\n* You _may_ set `TrainingInput.workerCount` to specify the number of workers to\n use. If you specify one or more workers, you _must_ also set\n `TrainingInput.workerType` to specify the type of machine to use for your\n worker nodes.\n* You _may_ set `TrainingInput.parameterServerCount` to specify the number of\n parameter servers to use. If you specify one or more parameter servers, you\n _must_ also set `TrainingInput.parameterServerType` to specify the type of\n machine to use for your parameter servers. Note that all of your workers must\n use the same machine type, which can be different from your parameter server\n type and main type. Your parameter servers must likewise use the same\n machine type, which can be different from your worker type and main type.\n*PREMIUM_1*::: A large number of workers with many parameter servers.\n*STANDARD_1*::: Many workers and a few parameter servers.\n+', u'group': u'', u'hidden': False, u'name': u'--scale-tier', @@ -38661,7 +38661,7 @@ u'value': u'DIRECTORY_NAME'}], u'release': u'ALPHA', u'sections': {u'DESCRIPTION': u'This command clones git repository for the currently active\nGoogle Cloud Platform project into the specified folder in the\ncurrent directory.\n', - u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin master\n"}}, + u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin main\n"}}, u'create': {u'capsule': u'*(ALPHA)* Create a named git repo for the project in the current directory.', u'commands': {}, u'flags': {}, @@ -66490,7 +66490,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The Kubernetes version to use for the master and nodes. Defaults to\nserver-specified.\n+\nThe default Kubernetes version are available using the following command.\n+\n $ gcloud container get-server-config', + u'description': u'The Kubernetes version to use for the main and nodes. Defaults to\nserver-specified.\n+\nThe default Kubernetes version are available using the following command.\n+\n $ gcloud container get-server-config', u'group': u'', u'hidden': False, u'name': u'--cluster-version', @@ -67486,7 +67486,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmaster's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", + u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmain's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", u'group': u'', u'hidden': False, u'name': u'--cluster-version', @@ -67507,15 +67507,15 @@ u'required': False, u'type': u'string', u'value': u'IMAGE_TYPE'}, - u'--master': {u'attr': {}, + u'--main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': False, - u'description': u"Upgrade the cluster's master to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the master.", + u'description': u"Upgrade the cluster's main to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the main.", u'group': u'', u'hidden': False, - u'name': u'--master', + u'name': u'--main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -67533,7 +67533,7 @@ u'required': False, u'type': u'bool', u'value': u'NO_ASYNC'}, - u'--no-master': {u'attr': {}, + u'--no-main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -67541,7 +67541,7 @@ u'description': u'', u'group': u'', u'hidden': True, - u'name': u'--no-master', + u'name': u'--no-main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -67602,8 +67602,8 @@ u'required': False, u'value': u'NAME'}], u'release': u'BETA', - u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *master* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", - u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmaster.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, + u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *main* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", + u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmain.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, u'flags': {u'--zone': {u'attr': {u'property': {u'name': u'compute/zone'}}, u'category': u'', u'choices': [], @@ -69540,7 +69540,7 @@ u'required': False, u'type': u'dict', u'value': u'KEY=VALUE'}, - u'--master-boot-disk-size': {u'attr': {}, + u'--main-boot-disk-size': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -69548,12 +69548,12 @@ u'description': u"The size of the boot disk. The value must be a\nwhole number followed by a size unit of ``KB'' for kilobyte, ``MB''\nfor megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,\n``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk\ncan have is 10 GB. Disk size must be a multiple of 1 GB.", u'group': u'create.1', u'hidden': False, - u'name': u'--master-boot-disk-size', + u'name': u'--main-boot-disk-size', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'MASTER_BOOT_DISK_SIZE'}, - u'--master-boot-disk-size-gb': {u'attr': {}, + u'--main-boot-disk-size-gb': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -69561,20 +69561,20 @@ u'description': u'', u'group': u'create.1', u'hidden': True, - u'name': u'--master-boot-disk-size-gb', + u'name': u'--main-boot-disk-size-gb', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'MASTER_BOOT_DISK_SIZE_GB'}, - u'--master-machine-type': {u'attr': {}, + u'--main-machine-type': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'The type of machine to use for the master. Defaults to server-specified.', + u'description': u'The type of machine to use for the main. Defaults to server-specified.', u'group': u'', u'hidden': False, - u'name': u'--master-machine-type', + u'name': u'--main-machine-type', u'nargs': u'0', u'required': False, u'type': u'string', @@ -69618,28 +69618,28 @@ u'required': False, u'type': u'bool', u'value': u'NO_ASYNC'}, - u'--num-master-local-ssds': {u'attr': {}, + u'--num-main-local-ssds': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'The number of local SSDs to attach to the master in a cluster.', + u'description': u'The number of local SSDs to attach to the main in a cluster.', u'group': u'', u'hidden': False, - u'name': u'--num-master-local-ssds', + u'name': u'--num-main-local-ssds', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'NUM_MASTER_LOCAL_SSDS'}, - u'--num-masters': {u'attr': {}, + u'--num-mains': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'The number of master nodes in the cluster.\n+\n[format="csv",options="header"]\n|========\nNumber of Masters,Cluster Mode\n1,Standard\n3,High Availability\n|========', + u'description': u'The number of main nodes in the cluster.\n+\n[format="csv",options="header"]\n|========\nNumber of Mains,Cluster Mode\n1,Standard\n3,High Availability\n|========', u'group': u'', u'hidden': False, - u'name': u'--num-masters', + u'name': u'--num-mains', u'nargs': u'0', u'required': False, u'type': u'string', @@ -74135,7 +74135,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The branch that will be used to get the source code of the function. The most recent revision on this branch will be used. Can be specified only together with --source-url parameter. If not specified defaults to `master`.', + u'description': u'The branch that will be used to get the source code of the function. The most recent revision on this branch will be used. Can be specified only together with --source-url parameter. If not specified defaults to `main`.', u'group': u'deploy.2', u'hidden': False, u'name': u'--source-branch', @@ -74187,7 +74187,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u'The Url of a remote repository that holds the function being deployed. It is of the form: https://source.developers.google.com/p/{project_id}/r/{repo_name}/, where you should substitute your data for values inside the curly brackets. You can omit "r/{repo_name}/" in which case the "default" repository is taken. One of the parameters --source-revision, --source-branch, or --source-tag can be given to specify the version in the repository. If none of them are provided, the last revision from the master branch is used. If this parameter is given, the parameter --source is required and describes the path inside the repository.', + u'description': u'The Url of a remote repository that holds the function being deployed. It is of the form: https://source.developers.google.com/p/{project_id}/r/{repo_name}/, where you should substitute your data for values inside the curly brackets. You can omit "r/{repo_name}/" in which case the "default" repository is taken. One of the parameters --source-revision, --source-branch, or --source-tag can be given to specify the version in the repository. If none of them are provided, the last revision from the main branch is used. If this parameter is given, the parameter --source is required and describes the path inside the repository.', u'group': u'deploy.3', u'hidden': False, u'name': u'--source-url', @@ -78389,7 +78389,7 @@ u'STANDARD_1'], u'completer': u'', u'default': None, - u'description': u'Specifies the machine types, the number of replicas for workers and parameter servers. _SCALE_TIER_ must be one of:\n+\n*BASIC*::: A single worker instance. This tier is suitable for learning how to use Cloud ML, and for experimenting with new models using small datasets.\n*BASIC_GPU*::: A single worker instance with a GPU.\n*CUSTOM*::: The CUSTOM tier is not a set tier, but rather enables you to use your own\ncluster specification. When you use this tier, set values to configure your\nprocessing cluster according to these guidelines (using the --config flag):\n+\n* You _must_ set `TrainingInput.masterType` to specify the type of machine to\n use for your master node. This is the only required setting.\n* You _may_ set `TrainingInput.workerCount` to specify the number of workers to\n use. If you specify one or more workers, you _must_ also set\n `TrainingInput.workerType` to specify the type of machine to use for your\n worker nodes.\n* You _may_ set `TrainingInput.parameterServerCount` to specify the number of\n parameter servers to use. If you specify one or more parameter servers, you\n _must_ also set `TrainingInput.parameterServerType` to specify the type of\n machine to use for your parameter servers. Note that all of your workers must\n use the same machine type, which can be different from your parameter server\n type and master type. Your parameter servers must likewise use the same\n machine type, which can be different from your worker type and master type.\n*PREMIUM_1*::: A large number of workers with many parameter servers.\n*STANDARD_1*::: Many workers and a few parameter servers.\n+', + u'description': u'Specifies the machine types, the number of replicas for workers and parameter servers. _SCALE_TIER_ must be one of:\n+\n*BASIC*::: A single worker instance. This tier is suitable for learning how to use Cloud ML, and for experimenting with new models using small datasets.\n*BASIC_GPU*::: A single worker instance with a GPU.\n*CUSTOM*::: The CUSTOM tier is not a set tier, but rather enables you to use your own\ncluster specification. When you use this tier, set values to configure your\nprocessing cluster according to these guidelines (using the --config flag):\n+\n* You _must_ set `TrainingInput.mainType` to specify the type of machine to\n use for your main node. This is the only required setting.\n* You _may_ set `TrainingInput.workerCount` to specify the number of workers to\n use. If you specify one or more workers, you _must_ also set\n `TrainingInput.workerType` to specify the type of machine to use for your\n worker nodes.\n* You _may_ set `TrainingInput.parameterServerCount` to specify the number of\n parameter servers to use. If you specify one or more parameter servers, you\n _must_ also set `TrainingInput.parameterServerType` to specify the type of\n machine to use for your parameter servers. Note that all of your workers must\n use the same machine type, which can be different from your parameter server\n type and main type. Your parameter servers must likewise use the same\n machine type, which can be different from your worker type and main type.\n*PREMIUM_1*::: A large number of workers with many parameter servers.\n*STANDARD_1*::: Many workers and a few parameter servers.\n+', u'group': u'', u'hidden': False, u'name': u'--scale-tier', @@ -82991,7 +82991,7 @@ u'value': u'DIRECTORY_NAME'}], u'release': u'BETA', u'sections': {u'DESCRIPTION': u'This command clones git repository for the currently active\nGoogle Cloud Platform project into the specified folder in the\ncurrent directory.\n', - u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin master\n"}}}, + u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin main\n"}}}, u'flags': {}, u'group': True, u'groups': {}, @@ -85350,15 +85350,15 @@ u'required': False, u'type': u'string', u'value': u'MAINTENANCE_WINDOW_HOUR'}, - u'--master-instance-name': {u'attr': {}, + u'--main-instance-name': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'Name of the instance which will act as master in the replication setup. The newly created instance will be a read replica of the specified master instance.', + u'description': u'Name of the instance which will act as main in the replication setup. The newly created instance will be a read replica of the specified main instance.', u'group': u'', u'hidden': False, - u'name': u'--master-instance-name', + u'name': u'--main-instance-name', u'nargs': u'0', u'required': False, u'type': u'string', @@ -108562,7 +108562,7 @@ u'choices': [], u'completer': u'', u'default': None, - u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmaster's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", + u'description': u"The Kubernetes release version to which to upgrade the cluster's nodes.\n+\nIf provided, the --cluster-version must be no greater than the cluster\nmain's minor version (x.*X*.x), and must be a latest patch version\n(x.x.*X*).\n+\nYou can find the list of allowed versions for upgrades by running:\n+\n $ gcloud container get-server-config", u'group': u'', u'hidden': False, u'name': u'--cluster-version', @@ -108583,15 +108583,15 @@ u'required': False, u'type': u'string', u'value': u'IMAGE_TYPE'}, - u'--master': {u'attr': {}, + u'--main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': False, - u'description': u"Upgrade the cluster's master to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the master.", + u'description': u"Upgrade the cluster's main to the latest version of Kubernetes supported on Container Engine. Nodes cannot be upgraded at the same time as the main.", u'group': u'', u'hidden': False, - u'name': u'--master', + u'name': u'--main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -108609,7 +108609,7 @@ u'required': False, u'type': u'bool', u'value': u'NO_ASYNC'}, - u'--no-master': {u'attr': {}, + u'--no-main': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -108617,7 +108617,7 @@ u'description': u'', u'group': u'', u'hidden': True, - u'name': u'--no-master', + u'name': u'--no-main', u'nargs': u'0', u'required': False, u'type': u'bool', @@ -108677,8 +108677,8 @@ u'required': False, u'value': u'NAME'}], u'release': u'GA', - u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *master* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", - u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmaster.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, + u'sections': {u'DESCRIPTION': u"Upgrades the Kubernetes version of an existing container cluster.\n\nThis command upgrades the Kubernetes version of the *nodes* of a cluster.\nThe Kubernetes version of the cluster's *main* is periodically upgraded\nautomatically as new releases are available.\n\n*By running this command, all of the cluster's nodes will be deleted and*\n*recreated one at a time.* While persistent Kubernetes resources, such as\npods backed by replication controllers, will be rescheduled onto new nodes,\na small cluster may experience a few minutes where there are insufficient\nnodes available to run all of the scheduled Kubernetes resources.\n\n*Please ensure that any data you wish to keep is stored on a persistent*\n*disk before upgrading the cluster.* Ephemeral Kubernetes resources--in\nparticular, pods without replication controllers--will be lost, while\npersistent Kubernetes resources will get rescheduled.\n", + u'EXAMPLES': u'Upgrade the nodes of to the Kubernetes version of the cluster\'s\nmain.\n\n $ upgrade \n\nUpgrade the nodes of to Kubernetes version x.y.z.\n\n $ upgrade --cluster-version "x.y.z"\n'}}}, u'flags': {u'--zone': {u'attr': {u'property': {u'name': u'compute/zone'}}, u'category': u'', u'choices': [], @@ -109905,7 +109905,7 @@ u'required': False, u'type': u'dict', u'value': u'KEY=VALUE'}, - u'--master-boot-disk-size': {u'attr': {}, + u'--main-boot-disk-size': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -109913,12 +109913,12 @@ u'description': u"The size of the boot disk. The value must be a\nwhole number followed by a size unit of ``KB'' for kilobyte, ``MB''\nfor megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,\n``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk\ncan have is 10 GB. Disk size must be a multiple of 1 GB.", u'group': u'create.1', u'hidden': False, - u'name': u'--master-boot-disk-size', + u'name': u'--main-boot-disk-size', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'MASTER_BOOT_DISK_SIZE'}, - u'--master-boot-disk-size-gb': {u'attr': {}, + u'--main-boot-disk-size-gb': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -109926,20 +109926,20 @@ u'description': u'', u'group': u'create.1', u'hidden': True, - u'name': u'--master-boot-disk-size-gb', + u'name': u'--main-boot-disk-size-gb', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'MASTER_BOOT_DISK_SIZE_GB'}, - u'--master-machine-type': {u'attr': {}, + u'--main-machine-type': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'The type of machine to use for the master. Defaults to server-specified.', + u'description': u'The type of machine to use for the main. Defaults to server-specified.', u'group': u'', u'hidden': False, - u'name': u'--master-machine-type', + u'name': u'--main-machine-type', u'nargs': u'0', u'required': False, u'type': u'string', @@ -109983,20 +109983,20 @@ u'required': False, u'type': u'bool', u'value': u'NO_ASYNC'}, - u'--num-master-local-ssds': {u'attr': {}, + u'--num-main-local-ssds': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'The number of local SSDs to attach to the master in a cluster.', + u'description': u'The number of local SSDs to attach to the main in a cluster.', u'group': u'', u'hidden': False, - u'name': u'--num-master-local-ssds', + u'name': u'--num-main-local-ssds', u'nargs': u'0', u'required': False, u'type': u'string', u'value': u'NUM_MASTER_LOCAL_SSDS'}, - u'--num-masters': {u'attr': {}, + u'--num-mains': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', @@ -110004,7 +110004,7 @@ u'description': u'', u'group': u'', u'hidden': True, - u'name': u'--num-masters', + u'name': u'--num-mains', u'nargs': u'0', u'required': False, u'type': u'string', @@ -117234,7 +117234,7 @@ u'value': u'DIRECTORY_NAME'}], u'release': u'GA', u'sections': {u'DESCRIPTION': u'This command clones git repository for the currently active\nGoogle Cloud Platform project into the specified folder in the\ncurrent directory.\n', - u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin master\n"}}}, + u'EXAMPLES': u"To use the default Google Cloud repository for development, use the\nfollowing commands. We recommend that you use your project name as\nTARGET_DIR to make it apparent which directory is used for which\nproject. We also recommend to clone the repository named 'default'\nsince it is automatically created for each project, and its\ncontents can be browsed and edited in the Developers Console.\n\n $ gcloud init\n $ gcloud source repos clone default TARGET_DIR\n $ cd TARGET_DIR\n ... create/edit files and create one or more commits ...\n $ git push origin main\n"}}}, u'flags': {}, u'group': True, u'groups': {}, @@ -117746,15 +117746,15 @@ u'required': False, u'type': u'string', u'value': u'GCE_ZONE'}, - u'--master-instance-name': {u'attr': {}, + u'--main-instance-name': {u'attr': {}, u'category': u'', u'choices': [], u'completer': u'', u'default': None, - u'description': u'Name of the instance which will act as master in the replication setup. The newly created instance will be a read replica of the specified master instance.', + u'description': u'Name of the instance which will act as main in the replication setup. The newly created instance will be a read replica of the specified main instance.', u'group': u'', u'hidden': False, - u'name': u'--master-instance-name', + u'name': u'--main-instance-name', u'nargs': u'0', u'required': False, u'type': u'string', @@ -119412,7 +119412,7 @@ u'projections'], u'positionals': [], u'release': u'GA', - u'sections': {u'DESCRIPTION': u'Most *gcloud* commands return a list of resources on success. By default they\nare pretty-printed on the standard output. The\n*--format=*_NAME_[_ATTRIBUTES_]*(*_PROJECTION_*)* flag changes the default\noutput:\n\n_NAME_:: The format name.\n\n_ATTRIBUTES_:: Format specific attributes. For details run $ gcloud topic formats.\n\n_PROJECTION_:: A list of resource keys that selects the data listed. Resource projections are described in detail below.\n\n_resource keys_:: Keys are names for resource resource items. For details run $ gcloud topic resource-keys.\n\nMost *gcloud* *list* commands have a *--filter=*_EXPRESSION_ flag that\nselects resources to be listed. For details run $ gcloud topic filters.\n\n\n### Projections\n\nA projection is a list of keys that selects resource data values.\nProjections are used in *--format* flag expressions. For example, the\n*table* format requires a projection that describes the table columns:\n\n table(name, network.ip.internal, network.ip.external, uri())\n\n### Transforms\n\nA *transform* formats resource data values. Each projection key may\nhave zero or more transform calls:\n\n _key_._transform_([arg...])...\n\nThis example applies the *foo*() and then the *bar*() transform to the\n*status.time* resource value:\n\n (name, status.time.foo().bar())\n\n\nThe builtin transform functions are:\n\n\n*always*()::\nMarks a transform sequence to always be applied.\n+\nIn some cases transforms are disabled. Prepending always() to a transform sequence causes the sequence to always be evaluated.\n+\nFor example:\n+\n`some_field.always().foo().bar()`:::\nAlways applies foo() and then bar().\n\n\n*basename*(undefined="")::\nReturns the last path component.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource or basename is empty.\n:::\n\n\n*collection*(undefined="")::\nReturns the current resource collection.\n+\nThe arguments are:\n+\n*```undefined```*:::\nThis value is returned if r or the collection is empty.\n:::\n\n\n*color*(red, yellow, green, blue)::\nColorizes the resource string value.\n+\nThe *red*, *yellow*, *green* and *blue* args are RE patterns, matched against the resource in order. The first pattern that matches colorizes the matched substring with that color, and the other patterns are skipped.\n+\nThe arguments are:\n+\n*```red```*:::\nThe substring pattern for the color red.\n:::\n*```yellow```*:::\nThe substring pattern for the color yellow.\n:::\n*```green```*:::\nThe substring pattern for the color green.\n:::\n*```blue```*:::\nThe substring pattern for the color blue.\n:::\n+\nFor example:\n+\n`color(red=STOP,yellow=CAUTION,green=GO)`:::\nFor the resource string "CAUTION means GO FASTER" displays the substring "CAUTION" in yellow.\n\n\n*count*()::\nCounts the number of each item in the list.\n+\nA string resource is treated as a list of characters.\n+\nFor example:\n+\n`"b/a/b/c".split("/").count()`:::\nReturns {a: 1, b: 2, c: 1}.\n\n\n*date*(format="%Y-%m-%dT%H:%M:%S", unit=1, undefined="", tz, tz_default)::\nFormats the resource as a strftime() format.\n+\nThe arguments are:\n+\n*```format```*:::\nThe strftime(3) format.\n:::\n*```unit```*:::\nIf the resource is a Timestamp then divide by _unit_ to yield seconds.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a valid time.\n:::\n*```tz```*:::\nReturn the time relative to the tz timezone if specified, the explicit timezone in the resource if it has one, otherwise the local timezone. For example, ...date(tz=EST5EDT, tz_default=UTC).\n:::\n*```tz_default```*:::\nThe default timezone if the resource does not have a timezone suffix.\n:::\n\n\n*decode*(encoding, undefined="")::\nReturns the decoded value of the resource that was encoded by encoding.\n+\nThe arguments are:\n+\n*```encoding```*:::\nThe encoding name. *base64* and *utf-8* are supported.\n:::\n*```undefined```*:::\nReturns this value if the decoding fails.\n:::\n\n\n*duration*(start="", end="", parts=3, precision=3, calendar=true, unit=1, undefined="")::\nFormats the resource as an ISO 8601 duration string.\n+\nThe [ISO 8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) format is: "[-]P[nY][nM][nD][T[nH][nM][n[.m]S]]". The 0 duration is "P0". Otherwise at least one part will always be displayed. Negative durations are prefixed by "-". "T" disambiguates months "P2M" to the left of "T" and minutes "PT5M" to the right.\n+\nIf the resource is a datetime then the duration of `resource - current_time` is returned.\n+\nThe arguments are:\n+\n*```start```*:::\nThe name of a start time attribute in the resource. The duration of the `end - start` time attributes in resource is returned. If `end` is not specified then the current time is used.\n:::\n*```end```*:::\nThe name of an end time attribute in the resource. Defaults to the current time if omitted. Ignored if `start` is not specified.\n:::\n*```parts```*:::\nFormat at most this many duration parts starting with largest non-zero part.\n:::\n*```precision```*:::\nFormat the last duration part with precision digits after the decimal point. Trailing "0" and "." are always stripped.\n:::\n*```calendar```*:::\nAllow time units larger than hours in formated durations if true. Durations specifying hours or smaller units are exact across daylight savings time boundaries. On by default. Use calendar=false to disable. For example, if `calendar=true` then at the daylight savings boundary 2016-03-13T01:00:00 + P1D => 2016-03-14T01:00:00 but 2016-03-13T01:00:00 + PT24H => 2016-03-14T03:00:00. Similarly, a +P1Y duration will be inexact but "calendar correct", yielding the same month and day number next year, even in leap years.\n:::\n*```unit```*:::\nDivide the resource numeric value by _unit_ to yield seconds.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a valid timestamp.\n:::\n+\nFor example:\n+\n`duration(start=createTime,end=updateTime)`:::\nThe duration from resource creation to the most recent update. \n`updateTime.duration()`:::\nThe duration since the most recent resource update.\n\n\n*encode*(encoding, undefined="")::\nReturns the encoded value of the resource using encoding.\n+\nThe arguments are:\n+\n*```encoding```*:::\nThe encoding name. *base64* and *utf-8* are supported.\n:::\n*```undefined```*:::\nReturns this value if the encoding fails.\n:::\n\n\n*enum*(enums, inverse=false, undefined="")::\nReturns the enums dictionary description for the resource.\n+\nThe arguments are:\n+\n*```enums```*:::\nThe name of a message enum dictionary.\n:::\n*```inverse```*:::\nDo inverse lookup if true.\n:::\n*```undefined```*:::\nReturns this value if there is no matching enum description.\n:::\n\n\n*error*(message)::\nRaises an Error exception that does not generate a stack trace.\n+\nThe arguments are:\n+\n*```message```*:::\nAn error message. If not specified then the resource is formatted as the error message.\n:::\n\n\n*extract*(keys)::\nExtract an ordered list of values from the resource for the specified keys.\n+\nThe arguments are:\n+\n*```keys```*:::\nThe list of keys in the resource whose associated values will be included in the result.\n:::\n\n\n*fatal*(message)::\nRaises an InternalError exception that generates a stack trace.\n+\nThe arguments are:\n+\n*```message```*:::\nAn error message. If not specified then the resource is formatted as the error message.\n:::\n\n\n*firstof*(keys)::\nReturns the first non-empty attribute value for key in keys.\n+\nThe arguments are:\n+\n*```keys```*:::\nKeys to check for resource attribute values,\n:::\n+\nFor example:\n+\n`x.firstof(bar_foo, barFoo, BarFoo, BAR_FOO)`:::\nChecks x.bar_foo, x.barFoo, x.BarFoo, and x.BAR_FOO in order for the first non-empty value.\n\n\n*float*(precision=6, spec, undefined="")::\nReturns the string representation of a floating point number.\n+\nOne of these formats is used (1) ". _precision_ _spec_" if _spec_ is specified (2) ". _precision_" unless 1e-04 <= abs(number) < 1e+09 (3) ".1f" otherwise.\n+\nThe arguments are:\n+\n*```precision```*:::\nThe maximum number of digits before and after the decimal point.\n:::\n*```spec```*:::\nThe printf(3) floating point format "e", "f" or "g" spec character.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a float.\n:::\n\n\n*format*(fmt, args)::\nFormats resource key values.\n+\nThe arguments are:\n+\n*```fmt```*:::\nThe format string with {0} ... {nargs-1} references to the resource attribute name arg values.\n:::\n*```args```*:::\nThe resource attribute key expression to format. The printer projection symbols and aliases may be used in key expressions. If no args are specified then the resource is used as the arg list if it is a list, otherwise the resource is used as the only arg.\n:::\n+\nFor example:\n+\n`--format=\'value(format("{0:f.1}/{1:f.1}", q.CPU.default, q.CPU.limit))\'`:::\nFormats q.CPU.default and q.CPU.limit as floating point numbers.\n\n\n*group*(keys)::\nFormats a [...] grouped list.\n+\nEach group is enclosed in [...]. The first item separator is \':\', subsequent separators are \',\'. [item1] [item1] ... [item1: item2] ... [item1: item2] [item1: item2, item3] ... [item1: item2, item3]\n+\nThe arguments are:\n+\n*```keys```*:::\nOptional attribute keys to select from the list. Otherwise the string value of each list item is selected.\n:::\n\n\n*if*(expr)::\nDisables the projection key if the flag name filter expr is false.\n+\nThe arguments are:\n+\n*```expr```*:::\nA command flag filter name expression. See `gcloud topic filters` for details on filter expressions. The expression variables are flag names without the leading *--* prefix and dashes replaced by underscores.\n:::\n+\nFor example:\n+\n`table(name, value.if(NOT short_format))`:::\nLists a value column if the *--short-format* command line flag is not specified.\n\n\n*iso*(undefined="T")::\nFormats the resource to numeric ISO time format.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource does not have an isoformat() attribute.\n:::\n\n\n*join*(sep="/", undefined="")::\nJoins the elements of the resource list by the value of sep.\n+\nA string resource is treated as a list of characters.\n+\nThe arguments are:\n+\n*```sep```*:::\nThe separator value to use when joining.\n:::\n*```undefined```*:::\nReturns this value if the result after joining is empty.\n:::\n+\nFor example:\n+\n`"a/b/c/d".split("/").join("!")`:::\nReturns "a!b!c!d".\n\n\n*len*()::\nReturns the length of the resource if it is non-empty, 0 otherwise.\n\n*list*(show="", undefined="", separator=",")::\nFormats a dict or list into a compact comma separated list.\n+\nThe arguments are:\n+\n*```show```*:::\nIf show=*keys* then list dict keys; if show=*values* then list dict values; otherwise list dict key=value pairs.\n:::\n*```undefined```*:::\nReturn this if the resource is empty.\n:::\n*```separator```*:::\nThe list item separator string.\n:::\n\n\n*map*(depth=1)::\nApplies the next transform in the sequence to each resource list item.\n+\nThe arguments are:\n+\n*```depth```*:::\nThe list nesting depth.\n:::\n+\nFor example:\n+\n`list_field.map().foo().list()`:::\nApplies foo() to each item in list_field and then list() to the resulting value to return a compact comma-separated list. \n`list_field.map().foo().map().bar()`:::\nApplies foo() to each item in list_field and then bar() to each item in the resulting list. \n`abc.xyz.map(2).foo()`:::\nApplies foo() to each item in xyz[] for all items in abc[].\n\n\n*notnull*()::\nRemove null values from the resource list.\n\n*resolution*(undefined="", transpose=false)::\nFormats a human readable XY resolution.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if a recognizable resolution was not found.\n:::\n*```transpose```*:::\nReturns the y/x resolution if true.\n:::\n\n\n*scope*(args)::\nGets the /args/ suffix from a URI.\n+\nThe arguments are:\n+\n*```args```*:::\nOptional URI segment names. If not specified then \'regions\', \'zones\' is assumed.\n:::\n+\nFor example:\n+\n`"https://abc/foo/projects/bar/xyz".scope("projects")`:::\nReturns "bar/xyz". \n`"https://xyz/foo/regions/abc".scope()`:::\nReturns "abc".\n\n\n*segment*(index=-1, undefined="")::\nReturns the index-th URI path segment.\n+\nThe arguments are:\n+\n*```index```*:::\nThe path segment index to return counting from 0.\n:::\n*```undefined```*:::\nReturns this value if the resource or segment index is empty.\n:::\n\n\n*size*(zero="0", precision=1, units_in, units_out, min=0)::\nFormats a human readable size in bytes.\n+\nThe arguments are:\n+\n*```zero```*:::\nReturns this if size==0. Ignored if None.\n:::\n*```precision```*:::\nThe number of digits displayed after the decimal point.\n:::\n*```units_in```*:::\nA unit suffix (only the first character is checked) or unit size. The size is multiplied by this. The default is 1.0.\n:::\n*```units_out```*:::\nA unit suffix (only the first character is checked) or unit size. The size is divided by this. The default is 1.0.\n:::\n*```min```*:::\nSizes < _min_ will be listed as "< _min_".\n:::\n\n\n*slice*(op=":", undefined="")::\nReturns a list slice specified by op.\n+\nThe op parameter consists of up to three colon-delimeted integers: start, end, and step. The parameter supports half-open ranges: start and end values can be omitted, representing the first and last positions of the resource respectively.\n+\nThe step value represents the increment between items in the resource included in the slice. A step of 2 results in a slice that contains every other item in the resource.\n+\nNegative values for start and end indicate that the positons should start from the last position of the resource. A negative value for step indicates that the slice should contain items in reverse order.\n+\nIf op contains no colons, the slice consists of the single item at the specified position in the resource.\n+\nThe arguments are:\n+\n*```op```*:::\nThe slice operation.\n:::\n*```undefined```*:::\nReturns this value if the slice cannot be created, or the resulting slice is empty.\n:::\n+\nFor example:\n+\n`[1,2,3].slice(1:)`:::\nReturns [2,3]. \n`[1,2,3].slice(:2)`:::\nReturns [1,2]. \n`[1,2,3].slice(-1:)`:::\nReturns [3]. \n`[1,2,3].slice(: :-1)`:::\nReturns [3,2,1]. \n`[1,2,3].slice(1)`:::\nReturns [2].\n\n\n*sort*(attr="")::\nSorts the elements of the resource list by a given attribute (or itself).\n+\nA string resource is treated as a list of characters.\n+\nThe arguments are:\n+\n*```attr```*:::\nThe optional field of an object or dict by which to sort.\n:::\n+\nFor example:\n+\n`"b/a/d/c".split("/").sort()`:::\nReturns "[a, b, c, d]".\n\n\n*split*(sep="/", undefined="")::\nSplits a string by the value of sep.\n+\nThe arguments are:\n+\n*```sep```*:::\nThe separator value to use when splitting.\n:::\n*```undefined```*:::\nReturns this value if the result after splitting is empty.\n:::\n+\nFor example:\n+\n`"a/b/c/d".split()`:::\nReturns ["a", "b", "c", "d"].\n\n\n*synthesize*(args)::\nSynthesizes a new resource from the schema arguments.\n+\nA list of tuple arguments controls the resource synthesis. Each tuple is a schema that defines the synthesis of one resource list item. Each schema item defines the synthesis of one synthesized_resource attribute from an original_resource attribute.\n+\nThere are three kinds of schema items:\n+\n*name:literal*:::\nThe value for the name attribute in the synthesized resource is the literal value. \n*name=key*:::\nThe value for the name attribute in the synthesized_resource is the value of key in the original_resource. \n*key*:::\nAll the attributes of the value of key in the original_resource are added to the attributes in the synthesized_resource. \n:::\n+\nThe arguments are:\n+\n*```args```*:::\nThe list of schema tuples.\n:::\n+\nFor example:\n+\nThis returns a list of two resource items:::\n`synthesize((name:up, upInfo), (name:down, downInfo))` \nIf upInfo and downInfo serialize to:::\n`{"foo": 1, "bar": "yes"}` \nand:::\n`{"foo": 0, "bar": "no"}` \nthen the synthesized resource list is:::\n`[{"name": "up", "foo": 1, "bar": "yes"}, {"name": "down", "foo": 0, "bar": "no"}]` \nThis could then be displayed by a nested table using:::\n`synthesize(...):format="table(name, foo, bar)"`\n\n\n*uri*(undefined=".")::\nGets the resource URI.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this if a the URI for r cannot be determined.\n:::\n\n\n*yesno*(yes, no="No")::\nReturns no if the resource is empty, yes or the resource itself otherwise.\n+\nThe arguments are:\n+\n*```yes```*:::\nIf the resource is not empty then returns _yes_ or the resource itself if _yes_ is not defined.\n:::\n*```no```*:::\nReturns this value if the resource is empty.\n:::\n\n\nThe cloudbuild transform functions are:\n\n\n*build_images*(undefined="")::\nReturns the formatted build results images.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*build_source*(undefined="")::\nReturns the formatted build source.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe compute transform functions are:\n\n\n*firewall_rule*(undefined="")::\nReturns a compact string describing a firewall rule.\n+\nThe compact string is a comma-separated list of PROTOCOL:PORT_RANGE items. If a particular protocol has no port ranges then only the protocol is listed.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*image_alias*(undefined="")::\nReturns a comma-separated list of alias names for an image.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*location*(undefined="")::\nReturn the region or zone name.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*location_scope*(undefined="")::\nReturn the location scope name, either region or zone.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*machine_type*()::\nReturn the formatted name for a machine type.\n\n*next_maintenance*(undefined="")::\nReturns the timestamps of the next scheduled maintenance.\n+\nAll timestamps are assumed to be ISO strings in the same timezone.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*operation_http_status*(undefined="")::\nReturns the HTTP response code of an operation.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if there is no response code.\n:::\n\n\n*quota*(undefined="")::\nFormats a quota as usage/limit.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*status*(undefined="")::\nReturns the machine status with deprecation information if applicable.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe container transform functions are:\n\n\n*master_version*(undefined="")::\nReturns the formatted master version.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe debug transform functions are:\n\n\n*full_status*(undefined="UNKNOWN_ERROR")::\nReturns a full description of the status of a logpoint or snapshot.\n+\nStatus will be one of ACTIVE, COMPLETED, or a verbose error description. If the status is an error, there will be additional information available in the status field of the object.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource is not a valid status.\n:::\n+\nFor example:\n+\n`--format="table(id, location, full_status())"`:::\nDisplays the full status in the third table problem.\n\n\n*short_status*(undefined="UNKNOWN_ERROR")::\nReturns a short description of the status of a logpoint or snapshot.\n+\nStatus will be one of ACTIVE, COMPLETED, or a short error description. If the status is an error, there will be additional information available in the status field of the object.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource is not a valid status.\n:::\n+\nFor example:\n+\n`--format="table(id, location, short_status())"`:::\nDisplays the short status in the third table problem.\n\n\nThe functions transform functions are:\n\n\n*trigger*(undefined="")::\nReturns textual information about functions trigger.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe runtimeconfig transform functions are:\n\n\n*waiter_status*(undefined="")::\nReturns a short description of the status of a waiter or waiter operation.\n+\nStatus will be one of WAITING, SUCCESS, FAILURE, or TIMEOUT.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource status cannot be determined.\n:::\n+\nFor example:\n+\n`--format="table(name, status())"`:::\nDisplays the status in table column two.\n\n\nThe service_registry transform functions are:\n\n\n*endpoint_address*(undefined="")::\nReturns a compact representation of an endpoint address.\n+\nThe compact representation for a plain address (no port information) is just the address. The compact representation for an address with a port is of the form [HOST/IP]:PORT and addresses with more details or more ports will look like\n+\n address=ADDRESS[;port_number=PORT[,protocol=PROTOCOL][,port_name=name]]+\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n+\nFor example:\n+\n`--format="table(name, addresses[].map().endpoint_address())"`:::\nDisplays each address as an endpoint address.\n\n\n\n### Key Attributes\n\nKey attributes control formatted output. Each projection key may have\nzero or more attributes:\n\n _key_:_attribute_=_value_...\n\nwhere =_value_ is omitted for Boolean attributes and no-_attribute_\nsets the attribute to false. Attribute values may appear in any order,\nbut must be specified after any transform calls. The attributes are:\n\n*alias*=_ALIAS-NAME_::\nSets _ALIAS-NAME_ as an alias for the projection key.\n\n*align*=_ALIGNMENT_::\nSpecifies the output column data alignment. Used by the *table*\nformat. The alignment values are:\n\n*left*:::\nLeft (default).\n\n*center*:::\nCenter.\n\n*right*:::\nRight.\n\n*label*=_LABEL_::\nA string value used to label output. Use :label="" or :label=\'\'\nfor no label. The *table* format uses _LABEL_ values as column\nheadings. Also sets _LABEL_ as an alias for the projection key.\nThe default label is the the disambiguated right hand parts of the\ncolumn key name in ANGRY_SNAKE_CASE.\n\n[no-]*reverse*::\nSets the key sort order to descending. *no-reverse* resets to the\ndefault ascending order.\n\n*sort*=_SORT-ORDER_::\nAn integer counting from 1. Keys with lower sort-order are sorted\nfirst. Keys with same sort order are sorted left to right.\n\n*wrap*::\nEnables the column text to be wrapped if the table would otherwise\nbe too wide for the display.\n', + u'sections': {u'DESCRIPTION': u'Most *gcloud* commands return a list of resources on success. By default they\nare pretty-printed on the standard output. The\n*--format=*_NAME_[_ATTRIBUTES_]*(*_PROJECTION_*)* flag changes the default\noutput:\n\n_NAME_:: The format name.\n\n_ATTRIBUTES_:: Format specific attributes. For details run $ gcloud topic formats.\n\n_PROJECTION_:: A list of resource keys that selects the data listed. Resource projections are described in detail below.\n\n_resource keys_:: Keys are names for resource resource items. For details run $ gcloud topic resource-keys.\n\nMost *gcloud* *list* commands have a *--filter=*_EXPRESSION_ flag that\nselects resources to be listed. For details run $ gcloud topic filters.\n\n\n### Projections\n\nA projection is a list of keys that selects resource data values.\nProjections are used in *--format* flag expressions. For example, the\n*table* format requires a projection that describes the table columns:\n\n table(name, network.ip.internal, network.ip.external, uri())\n\n### Transforms\n\nA *transform* formats resource data values. Each projection key may\nhave zero or more transform calls:\n\n _key_._transform_([arg...])...\n\nThis example applies the *foo*() and then the *bar*() transform to the\n*status.time* resource value:\n\n (name, status.time.foo().bar())\n\n\nThe builtin transform functions are:\n\n\n*always*()::\nMarks a transform sequence to always be applied.\n+\nIn some cases transforms are disabled. Prepending always() to a transform sequence causes the sequence to always be evaluated.\n+\nFor example:\n+\n`some_field.always().foo().bar()`:::\nAlways applies foo() and then bar().\n\n\n*basename*(undefined="")::\nReturns the last path component.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource or basename is empty.\n:::\n\n\n*collection*(undefined="")::\nReturns the current resource collection.\n+\nThe arguments are:\n+\n*```undefined```*:::\nThis value is returned if r or the collection is empty.\n:::\n\n\n*color*(red, yellow, green, blue)::\nColorizes the resource string value.\n+\nThe *red*, *yellow*, *green* and *blue* args are RE patterns, matched against the resource in order. The first pattern that matches colorizes the matched substring with that color, and the other patterns are skipped.\n+\nThe arguments are:\n+\n*```red```*:::\nThe substring pattern for the color red.\n:::\n*```yellow```*:::\nThe substring pattern for the color yellow.\n:::\n*```green```*:::\nThe substring pattern for the color green.\n:::\n*```blue```*:::\nThe substring pattern for the color blue.\n:::\n+\nFor example:\n+\n`color(red=STOP,yellow=CAUTION,green=GO)`:::\nFor the resource string "CAUTION means GO FASTER" displays the substring "CAUTION" in yellow.\n\n\n*count*()::\nCounts the number of each item in the list.\n+\nA string resource is treated as a list of characters.\n+\nFor example:\n+\n`"b/a/b/c".split("/").count()`:::\nReturns {a: 1, b: 2, c: 1}.\n\n\n*date*(format="%Y-%m-%dT%H:%M:%S", unit=1, undefined="", tz, tz_default)::\nFormats the resource as a strftime() format.\n+\nThe arguments are:\n+\n*```format```*:::\nThe strftime(3) format.\n:::\n*```unit```*:::\nIf the resource is a Timestamp then divide by _unit_ to yield seconds.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a valid time.\n:::\n*```tz```*:::\nReturn the time relative to the tz timezone if specified, the explicit timezone in the resource if it has one, otherwise the local timezone. For example, ...date(tz=EST5EDT, tz_default=UTC).\n:::\n*```tz_default```*:::\nThe default timezone if the resource does not have a timezone suffix.\n:::\n\n\n*decode*(encoding, undefined="")::\nReturns the decoded value of the resource that was encoded by encoding.\n+\nThe arguments are:\n+\n*```encoding```*:::\nThe encoding name. *base64* and *utf-8* are supported.\n:::\n*```undefined```*:::\nReturns this value if the decoding fails.\n:::\n\n\n*duration*(start="", end="", parts=3, precision=3, calendar=true, unit=1, undefined="")::\nFormats the resource as an ISO 8601 duration string.\n+\nThe [ISO 8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) format is: "[-]P[nY][nM][nD][T[nH][nM][n[.m]S]]". The 0 duration is "P0". Otherwise at least one part will always be displayed. Negative durations are prefixed by "-". "T" disambiguates months "P2M" to the left of "T" and minutes "PT5M" to the right.\n+\nIf the resource is a datetime then the duration of `resource - current_time` is returned.\n+\nThe arguments are:\n+\n*```start```*:::\nThe name of a start time attribute in the resource. The duration of the `end - start` time attributes in resource is returned. If `end` is not specified then the current time is used.\n:::\n*```end```*:::\nThe name of an end time attribute in the resource. Defaults to the current time if omitted. Ignored if `start` is not specified.\n:::\n*```parts```*:::\nFormat at most this many duration parts starting with largest non-zero part.\n:::\n*```precision```*:::\nFormat the last duration part with precision digits after the decimal point. Trailing "0" and "." are always stripped.\n:::\n*```calendar```*:::\nAllow time units larger than hours in formated durations if true. Durations specifying hours or smaller units are exact across daylight savings time boundaries. On by default. Use calendar=false to disable. For example, if `calendar=true` then at the daylight savings boundary 2016-03-13T01:00:00 + P1D => 2016-03-14T01:00:00 but 2016-03-13T01:00:00 + PT24H => 2016-03-14T03:00:00. Similarly, a +P1Y duration will be inexact but "calendar correct", yielding the same month and day number next year, even in leap years.\n:::\n*```unit```*:::\nDivide the resource numeric value by _unit_ to yield seconds.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a valid timestamp.\n:::\n+\nFor example:\n+\n`duration(start=createTime,end=updateTime)`:::\nThe duration from resource creation to the most recent update. \n`updateTime.duration()`:::\nThe duration since the most recent resource update.\n\n\n*encode*(encoding, undefined="")::\nReturns the encoded value of the resource using encoding.\n+\nThe arguments are:\n+\n*```encoding```*:::\nThe encoding name. *base64* and *utf-8* are supported.\n:::\n*```undefined```*:::\nReturns this value if the encoding fails.\n:::\n\n\n*enum*(enums, inverse=false, undefined="")::\nReturns the enums dictionary description for the resource.\n+\nThe arguments are:\n+\n*```enums```*:::\nThe name of a message enum dictionary.\n:::\n*```inverse```*:::\nDo inverse lookup if true.\n:::\n*```undefined```*:::\nReturns this value if there is no matching enum description.\n:::\n\n\n*error*(message)::\nRaises an Error exception that does not generate a stack trace.\n+\nThe arguments are:\n+\n*```message```*:::\nAn error message. If not specified then the resource is formatted as the error message.\n:::\n\n\n*extract*(keys)::\nExtract an ordered list of values from the resource for the specified keys.\n+\nThe arguments are:\n+\n*```keys```*:::\nThe list of keys in the resource whose associated values will be included in the result.\n:::\n\n\n*fatal*(message)::\nRaises an InternalError exception that generates a stack trace.\n+\nThe arguments are:\n+\n*```message```*:::\nAn error message. If not specified then the resource is formatted as the error message.\n:::\n\n\n*firstof*(keys)::\nReturns the first non-empty attribute value for key in keys.\n+\nThe arguments are:\n+\n*```keys```*:::\nKeys to check for resource attribute values,\n:::\n+\nFor example:\n+\n`x.firstof(bar_foo, barFoo, BarFoo, BAR_FOO)`:::\nChecks x.bar_foo, x.barFoo, x.BarFoo, and x.BAR_FOO in order for the first non-empty value.\n\n\n*float*(precision=6, spec, undefined="")::\nReturns the string representation of a floating point number.\n+\nOne of these formats is used (1) ". _precision_ _spec_" if _spec_ is specified (2) ". _precision_" unless 1e-04 <= abs(number) < 1e+09 (3) ".1f" otherwise.\n+\nThe arguments are:\n+\n*```precision```*:::\nThe maximum number of digits before and after the decimal point.\n:::\n*```spec```*:::\nThe printf(3) floating point format "e", "f" or "g" spec character.\n:::\n*```undefined```*:::\nReturns this value if the resource is not a float.\n:::\n\n\n*format*(fmt, args)::\nFormats resource key values.\n+\nThe arguments are:\n+\n*```fmt```*:::\nThe format string with {0} ... {nargs-1} references to the resource attribute name arg values.\n:::\n*```args```*:::\nThe resource attribute key expression to format. The printer projection symbols and aliases may be used in key expressions. If no args are specified then the resource is used as the arg list if it is a list, otherwise the resource is used as the only arg.\n:::\n+\nFor example:\n+\n`--format=\'value(format("{0:f.1}/{1:f.1}", q.CPU.default, q.CPU.limit))\'`:::\nFormats q.CPU.default and q.CPU.limit as floating point numbers.\n\n\n*group*(keys)::\nFormats a [...] grouped list.\n+\nEach group is enclosed in [...]. The first item separator is \':\', subsequent separators are \',\'. [item1] [item1] ... [item1: item2] ... [item1: item2] [item1: item2, item3] ... [item1: item2, item3]\n+\nThe arguments are:\n+\n*```keys```*:::\nOptional attribute keys to select from the list. Otherwise the string value of each list item is selected.\n:::\n\n\n*if*(expr)::\nDisables the projection key if the flag name filter expr is false.\n+\nThe arguments are:\n+\n*```expr```*:::\nA command flag filter name expression. See `gcloud topic filters` for details on filter expressions. The expression variables are flag names without the leading *--* prefix and dashes replaced by underscores.\n:::\n+\nFor example:\n+\n`table(name, value.if(NOT short_format))`:::\nLists a value column if the *--short-format* command line flag is not specified.\n\n\n*iso*(undefined="T")::\nFormats the resource to numeric ISO time format.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource does not have an isoformat() attribute.\n:::\n\n\n*join*(sep="/", undefined="")::\nJoins the elements of the resource list by the value of sep.\n+\nA string resource is treated as a list of characters.\n+\nThe arguments are:\n+\n*```sep```*:::\nThe separator value to use when joining.\n:::\n*```undefined```*:::\nReturns this value if the result after joining is empty.\n:::\n+\nFor example:\n+\n`"a/b/c/d".split("/").join("!")`:::\nReturns "a!b!c!d".\n\n\n*len*()::\nReturns the length of the resource if it is non-empty, 0 otherwise.\n\n*list*(show="", undefined="", separator=",")::\nFormats a dict or list into a compact comma separated list.\n+\nThe arguments are:\n+\n*```show```*:::\nIf show=*keys* then list dict keys; if show=*values* then list dict values; otherwise list dict key=value pairs.\n:::\n*```undefined```*:::\nReturn this if the resource is empty.\n:::\n*```separator```*:::\nThe list item separator string.\n:::\n\n\n*map*(depth=1)::\nApplies the next transform in the sequence to each resource list item.\n+\nThe arguments are:\n+\n*```depth```*:::\nThe list nesting depth.\n:::\n+\nFor example:\n+\n`list_field.map().foo().list()`:::\nApplies foo() to each item in list_field and then list() to the resulting value to return a compact comma-separated list. \n`list_field.map().foo().map().bar()`:::\nApplies foo() to each item in list_field and then bar() to each item in the resulting list. \n`abc.xyz.map(2).foo()`:::\nApplies foo() to each item in xyz[] for all items in abc[].\n\n\n*notnull*()::\nRemove null values from the resource list.\n\n*resolution*(undefined="", transpose=false)::\nFormats a human readable XY resolution.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if a recognizable resolution was not found.\n:::\n*```transpose```*:::\nReturns the y/x resolution if true.\n:::\n\n\n*scope*(args)::\nGets the /args/ suffix from a URI.\n+\nThe arguments are:\n+\n*```args```*:::\nOptional URI segment names. If not specified then \'regions\', \'zones\' is assumed.\n:::\n+\nFor example:\n+\n`"https://abc/foo/projects/bar/xyz".scope("projects")`:::\nReturns "bar/xyz". \n`"https://xyz/foo/regions/abc".scope()`:::\nReturns "abc".\n\n\n*segment*(index=-1, undefined="")::\nReturns the index-th URI path segment.\n+\nThe arguments are:\n+\n*```index```*:::\nThe path segment index to return counting from 0.\n:::\n*```undefined```*:::\nReturns this value if the resource or segment index is empty.\n:::\n\n\n*size*(zero="0", precision=1, units_in, units_out, min=0)::\nFormats a human readable size in bytes.\n+\nThe arguments are:\n+\n*```zero```*:::\nReturns this if size==0. Ignored if None.\n:::\n*```precision```*:::\nThe number of digits displayed after the decimal point.\n:::\n*```units_in```*:::\nA unit suffix (only the first character is checked) or unit size. The size is multiplied by this. The default is 1.0.\n:::\n*```units_out```*:::\nA unit suffix (only the first character is checked) or unit size. The size is divided by this. The default is 1.0.\n:::\n*```min```*:::\nSizes < _min_ will be listed as "< _min_".\n:::\n\n\n*slice*(op=":", undefined="")::\nReturns a list slice specified by op.\n+\nThe op parameter consists of up to three colon-delimeted integers: start, end, and step. The parameter supports half-open ranges: start and end values can be omitted, representing the first and last positions of the resource respectively.\n+\nThe step value represents the increment between items in the resource included in the slice. A step of 2 results in a slice that contains every other item in the resource.\n+\nNegative values for start and end indicate that the positons should start from the last position of the resource. A negative value for step indicates that the slice should contain items in reverse order.\n+\nIf op contains no colons, the slice consists of the single item at the specified position in the resource.\n+\nThe arguments are:\n+\n*```op```*:::\nThe slice operation.\n:::\n*```undefined```*:::\nReturns this value if the slice cannot be created, or the resulting slice is empty.\n:::\n+\nFor example:\n+\n`[1,2,3].slice(1:)`:::\nReturns [2,3]. \n`[1,2,3].slice(:2)`:::\nReturns [1,2]. \n`[1,2,3].slice(-1:)`:::\nReturns [3]. \n`[1,2,3].slice(: :-1)`:::\nReturns [3,2,1]. \n`[1,2,3].slice(1)`:::\nReturns [2].\n\n\n*sort*(attr="")::\nSorts the elements of the resource list by a given attribute (or itself).\n+\nA string resource is treated as a list of characters.\n+\nThe arguments are:\n+\n*```attr```*:::\nThe optional field of an object or dict by which to sort.\n:::\n+\nFor example:\n+\n`"b/a/d/c".split("/").sort()`:::\nReturns "[a, b, c, d]".\n\n\n*split*(sep="/", undefined="")::\nSplits a string by the value of sep.\n+\nThe arguments are:\n+\n*```sep```*:::\nThe separator value to use when splitting.\n:::\n*```undefined```*:::\nReturns this value if the result after splitting is empty.\n:::\n+\nFor example:\n+\n`"a/b/c/d".split()`:::\nReturns ["a", "b", "c", "d"].\n\n\n*synthesize*(args)::\nSynthesizes a new resource from the schema arguments.\n+\nA list of tuple arguments controls the resource synthesis. Each tuple is a schema that defines the synthesis of one resource list item. Each schema item defines the synthesis of one synthesized_resource attribute from an original_resource attribute.\n+\nThere are three kinds of schema items:\n+\n*name:literal*:::\nThe value for the name attribute in the synthesized resource is the literal value. \n*name=key*:::\nThe value for the name attribute in the synthesized_resource is the value of key in the original_resource. \n*key*:::\nAll the attributes of the value of key in the original_resource are added to the attributes in the synthesized_resource. \n:::\n+\nThe arguments are:\n+\n*```args```*:::\nThe list of schema tuples.\n:::\n+\nFor example:\n+\nThis returns a list of two resource items:::\n`synthesize((name:up, upInfo), (name:down, downInfo))` \nIf upInfo and downInfo serialize to:::\n`{"foo": 1, "bar": "yes"}` \nand:::\n`{"foo": 0, "bar": "no"}` \nthen the synthesized resource list is:::\n`[{"name": "up", "foo": 1, "bar": "yes"}, {"name": "down", "foo": 0, "bar": "no"}]` \nThis could then be displayed by a nested table using:::\n`synthesize(...):format="table(name, foo, bar)"`\n\n\n*uri*(undefined=".")::\nGets the resource URI.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this if a the URI for r cannot be determined.\n:::\n\n\n*yesno*(yes, no="No")::\nReturns no if the resource is empty, yes or the resource itself otherwise.\n+\nThe arguments are:\n+\n*```yes```*:::\nIf the resource is not empty then returns _yes_ or the resource itself if _yes_ is not defined.\n:::\n*```no```*:::\nReturns this value if the resource is empty.\n:::\n\n\nThe cloudbuild transform functions are:\n\n\n*build_images*(undefined="")::\nReturns the formatted build results images.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*build_source*(undefined="")::\nReturns the formatted build source.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe compute transform functions are:\n\n\n*firewall_rule*(undefined="")::\nReturns a compact string describing a firewall rule.\n+\nThe compact string is a comma-separated list of PROTOCOL:PORT_RANGE items. If a particular protocol has no port ranges then only the protocol is listed.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*image_alias*(undefined="")::\nReturns a comma-separated list of alias names for an image.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*location*(undefined="")::\nReturn the region or zone name.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*location_scope*(undefined="")::\nReturn the location scope name, either region or zone.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*machine_type*()::\nReturn the formatted name for a machine type.\n\n*next_maintenance*(undefined="")::\nReturns the timestamps of the next scheduled maintenance.\n+\nAll timestamps are assumed to be ISO strings in the same timezone.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*operation_http_status*(undefined="")::\nReturns the HTTP response code of an operation.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if there is no response code.\n:::\n\n\n*quota*(undefined="")::\nFormats a quota as usage/limit.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\n*status*(undefined="")::\nReturns the machine status with deprecation information if applicable.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe container transform functions are:\n\n\n*main_version*(undefined="")::\nReturns the formatted main version.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe debug transform functions are:\n\n\n*full_status*(undefined="UNKNOWN_ERROR")::\nReturns a full description of the status of a logpoint or snapshot.\n+\nStatus will be one of ACTIVE, COMPLETED, or a verbose error description. If the status is an error, there will be additional information available in the status field of the object.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource is not a valid status.\n:::\n+\nFor example:\n+\n`--format="table(id, location, full_status())"`:::\nDisplays the full status in the third table problem.\n\n\n*short_status*(undefined="UNKNOWN_ERROR")::\nReturns a short description of the status of a logpoint or snapshot.\n+\nStatus will be one of ACTIVE, COMPLETED, or a short error description. If the status is an error, there will be additional information available in the status field of the object.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource is not a valid status.\n:::\n+\nFor example:\n+\n`--format="table(id, location, short_status())"`:::\nDisplays the short status in the third table problem.\n\n\nThe functions transform functions are:\n\n\n*trigger*(undefined="")::\nReturns textual information about functions trigger.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n\n\nThe runtimeconfig transform functions are:\n\n\n*waiter_status*(undefined="")::\nReturns a short description of the status of a waiter or waiter operation.\n+\nStatus will be one of WAITING, SUCCESS, FAILURE, or TIMEOUT.\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource status cannot be determined.\n:::\n+\nFor example:\n+\n`--format="table(name, status())"`:::\nDisplays the status in table column two.\n\n\nThe service_registry transform functions are:\n\n\n*endpoint_address*(undefined="")::\nReturns a compact representation of an endpoint address.\n+\nThe compact representation for a plain address (no port information) is just the address. The compact representation for an address with a port is of the form [HOST/IP]:PORT and addresses with more details or more ports will look like\n+\n address=ADDRESS[;port_number=PORT[,protocol=PROTOCOL][,port_name=name]]+\n+\nThe arguments are:\n+\n*```undefined```*:::\nReturns this value if the resource cannot be formatted.\n:::\n+\nFor example:\n+\n`--format="table(name, addresses[].map().endpoint_address())"`:::\nDisplays each address as an endpoint address.\n\n\n\n### Key Attributes\n\nKey attributes control formatted output. Each projection key may have\nzero or more attributes:\n\n _key_:_attribute_=_value_...\n\nwhere =_value_ is omitted for Boolean attributes and no-_attribute_\nsets the attribute to false. Attribute values may appear in any order,\nbut must be specified after any transform calls. The attributes are:\n\n*alias*=_ALIAS-NAME_::\nSets _ALIAS-NAME_ as an alias for the projection key.\n\n*align*=_ALIGNMENT_::\nSpecifies the output column data alignment. Used by the *table*\nformat. The alignment values are:\n\n*left*:::\nLeft (default).\n\n*center*:::\nCenter.\n\n*right*:::\nRight.\n\n*label*=_LABEL_::\nA string value used to label output. Use :label="" or :label=\'\'\nfor no label. The *table* format uses _LABEL_ values as column\nheadings. Also sets _LABEL_ as an alias for the projection key.\nThe default label is the the disambiguated right hand parts of the\ncolumn key name in ANGRY_SNAKE_CASE.\n\n[no-]*reverse*::\nSets the key sort order to descending. *no-reverse* resets to the\ndefault ascending order.\n\n*sort*=_SORT-ORDER_::\nAn integer counting from 1. Keys with lower sort-order are sorted\nfirst. Keys with same sort order are sorted left to right.\n\n*wrap*::\nEnables the column text to be wrapped if the table would otherwise\nbe too wide for the display.\n', u'EXAMPLES': u'List a table of instance *zone* (sorted in descending order) and\n*name* (sorted by *name* and centered with column heading *INSTANCE*)\nand *creationTimestamp* (listed using the *strftime*(3) year-month-day\nformat with column heading *START*):\n\n $ gcloud compute instances list --format=\'table(name:sort=2:align=center:label=INSTANCE, zone:sort=1:reverse, creationTimestamp.date("%Y-%m-%d"):label=START)\'\n\nList only the *name*, *status* and *zone* instance resource keys in\nYAML format:\n\n $ gcloud compute instances list --format=\'yaml(name, status, zone)\'\n\nList only the *config.account* key value(s) in the *info* resource:\n\n $ gcloud info --format=\'value(config.account)\'\n'}}, u'resource-keys': {u'capsule': u'Resource keys supplementary help.', u'commands': {}, diff --git a/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_registry.py b/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_registry.py index 6ee3f69..a30afd9 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_registry.py +++ b/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_registry.py @@ -824,7 +824,7 @@ table( name, zone, - master_version():label=MASTER_VERSION, + main_version():label=MASTER_VERSION, endpoint:label=MASTER_IP, nodePools[0].config.machineType, currentNodeVersion:label=NODE_VERSION, diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_client.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_client.py index 532e2ab..22831b9 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_client.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_client.py @@ -34,33 +34,33 @@ def __init__(self, url='', credentials=None, credentials_args=credentials_args, default_global_params=default_global_params, additional_http_headers=additional_http_headers) - self.masterProjects_zones_signedUrls = self.MasterProjectsZonesSignedUrlsService(self) - self.masterProjects_zones_tokens = self.MasterProjectsZonesTokensService(self) - self.masterProjects_zones = self.MasterProjectsZonesService(self) - self.masterProjects = self.MasterProjectsService(self) + self.mainProjects_zones_signedUrls = self.MainProjectsZonesSignedUrlsService(self) + self.mainProjects_zones_tokens = self.MainProjectsZonesTokensService(self) + self.mainProjects_zones = self.MainProjectsZonesService(self) + self.mainProjects = self.MainProjectsService(self) self.projects_zones_clusters_nodePools = self.ProjectsZonesClustersNodePoolsService(self) self.projects_zones_clusters = self.ProjectsZonesClustersService(self) self.projects_zones_operations = self.ProjectsZonesOperationsService(self) self.projects_zones = self.ProjectsZonesService(self) self.projects = self.ProjectsService(self) - class MasterProjectsZonesSignedUrlsService(base_api.BaseApiService): - """Service class for the masterProjects_zones_signedUrls resource.""" + class MainProjectsZonesSignedUrlsService(base_api.BaseApiService): + """Service class for the mainProjects_zones_signedUrls resource.""" - _NAME = u'masterProjects_zones_signedUrls' + _NAME = u'mainProjects_zones_signedUrls' def __init__(self, client): - super(ContainerV1.MasterProjectsZonesSignedUrlsService, self).__init__(client) + super(ContainerV1.MainProjectsZonesSignedUrlsService, self).__init__(client) self._upload_configs = { } def Create(self, request, global_params=None): """Creates signed URLs that allow for writing a file to a private GCS bucket. -for storing backups of hosted master data. Signed URLs are explained here: +for storing backups of hosted main data. Signed URLs are explained here: https://cloud.google.com/storage/docs/access-control#Signed-URLs Args: - request: (ContainerMasterProjectsZonesSignedUrlsCreateRequest) input message + request: (ContainerMainProjectsZonesSignedUrlsCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (SignedUrls) The response message. @@ -71,34 +71,34 @@ def Create(self, request, global_params=None): Create.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.signedUrls.create', - ordered_params=[u'masterProjectId', u'zone'], - path_params=[u'masterProjectId', u'zone'], + method_id=u'container.mainProjects.zones.signedUrls.create', + ordered_params=[u'mainProjectId', u'zone'], + path_params=[u'mainProjectId', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/signedUrls', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/signedUrls', request_field=u'createSignedUrlsRequest', - request_type_name=u'ContainerMasterProjectsZonesSignedUrlsCreateRequest', + request_type_name=u'ContainerMainProjectsZonesSignedUrlsCreateRequest', response_type_name=u'SignedUrls', supports_download=False, ) - class MasterProjectsZonesTokensService(base_api.BaseApiService): - """Service class for the masterProjects_zones_tokens resource.""" + class MainProjectsZonesTokensService(base_api.BaseApiService): + """Service class for the mainProjects_zones_tokens resource.""" - _NAME = u'masterProjects_zones_tokens' + _NAME = u'mainProjects_zones_tokens' def __init__(self, client): - super(ContainerV1.MasterProjectsZonesTokensService, self).__init__(client) + super(ContainerV1.MainProjectsZonesTokensService, self).__init__(client) self._upload_configs = { } def Create(self, request, global_params=None): """Creates a compute-read-write (https://www.googleapis.com/auth/compute). -scoped OAuth2 access token for , to allow a hosted master +scoped OAuth2 access token for , to allow a hosted main to make modifications to its user's project. Args: - request: (ContainerMasterProjectsZonesTokensCreateRequest) input message + request: (ContainerMainProjectsZonesTokensCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Token) The response message. @@ -109,24 +109,24 @@ def Create(self, request, global_params=None): Create.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.tokens.create', - ordered_params=[u'masterProjectId', u'zone'], - path_params=[u'masterProjectId', u'zone'], + method_id=u'container.mainProjects.zones.tokens.create', + ordered_params=[u'mainProjectId', u'zone'], + path_params=[u'mainProjectId', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/tokens', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/tokens', request_field=u'createTokenRequest', - request_type_name=u'ContainerMasterProjectsZonesTokensCreateRequest', + request_type_name=u'ContainerMainProjectsZonesTokensCreateRequest', response_type_name=u'Token', supports_download=False, ) - class MasterProjectsZonesService(base_api.BaseApiService): - """Service class for the masterProjects_zones resource.""" + class MainProjectsZonesService(base_api.BaseApiService): + """Service class for the mainProjects_zones resource.""" - _NAME = u'masterProjects_zones' + _NAME = u'mainProjects_zones' def __init__(self, client): - super(ContainerV1.MasterProjectsZonesService, self).__init__(client) + super(ContainerV1.MainProjectsZonesService, self).__init__(client) self._upload_configs = { } @@ -140,7 +140,7 @@ def Authenticate(self, request, global_params=None): https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/authentication.k8s.io/v1beta1/types.go. Args: - request: (ContainerMasterProjectsZonesAuthenticateRequest) input message + request: (ContainerMainProjectsZonesAuthenticateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (AuthenticateResponse) The response message. @@ -151,13 +151,13 @@ def Authenticate(self, request, global_params=None): Authenticate.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.authenticate', - ordered_params=[u'masterProjectId', u'zone', u'projectNumber', u'clusterId'], - path_params=[u'clusterId', u'masterProjectId', u'projectNumber', u'zone'], + method_id=u'container.mainProjects.zones.authenticate', + ordered_params=[u'mainProjectId', u'zone', u'projectNumber', u'clusterId'], + path_params=[u'clusterId', u'mainProjectId', u'projectNumber', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/{projectNumber}/{clusterId}/authenticate', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/{projectNumber}/{clusterId}/authenticate', request_field=u'authenticateRequest', - request_type_name=u'ContainerMasterProjectsZonesAuthenticateRequest', + request_type_name=u'ContainerMainProjectsZonesAuthenticateRequest', response_type_name=u'AuthenticateResponse', supports_download=False, ) @@ -172,7 +172,7 @@ def Authorize(self, request, global_params=None): https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/authorization/v1beta1/types.go. Args: - request: (ContainerMasterProjectsZonesAuthorizeRequest) input message + request: (ContainerMainProjectsZonesAuthorizeRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (AuthorizeResponse) The response message. @@ -183,13 +183,13 @@ def Authorize(self, request, global_params=None): Authorize.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.authorize', - ordered_params=[u'masterProjectId', u'zone', u'projectNumber', u'clusterId'], - path_params=[u'clusterId', u'masterProjectId', u'projectNumber', u'zone'], + method_id=u'container.mainProjects.zones.authorize', + ordered_params=[u'mainProjectId', u'zone', u'projectNumber', u'clusterId'], + path_params=[u'clusterId', u'mainProjectId', u'projectNumber', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/{projectNumber}/{clusterId}/authorize', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/{projectNumber}/{clusterId}/authorize', request_field=u'authorizeRequest', - request_type_name=u'ContainerMasterProjectsZonesAuthorizeRequest', + request_type_name=u'ContainerMainProjectsZonesAuthorizeRequest', response_type_name=u'AuthorizeResponse', supports_download=False, ) @@ -203,7 +203,7 @@ def Imagereview(self, request, global_params=None): https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/imagepolicy/v1beta1/types.go. Args: - request: (ContainerMasterProjectsZonesImagereviewRequest) input message + request: (ContainerMainProjectsZonesImagereviewRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ImageReviewResponse) The response message. @@ -214,13 +214,13 @@ def Imagereview(self, request, global_params=None): Imagereview.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.imagereview', - ordered_params=[u'masterProjectId', u'zone', u'projectNumber', u'clusterId'], - path_params=[u'clusterId', u'masterProjectId', u'projectNumber', u'zone'], + method_id=u'container.mainProjects.zones.imagereview', + ordered_params=[u'mainProjectId', u'zone', u'projectNumber', u'clusterId'], + path_params=[u'clusterId', u'mainProjectId', u'projectNumber', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/{projectNumber}/{clusterId}/imagereview', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/{projectNumber}/{clusterId}/imagereview', request_field=u'imageReviewRequest', - request_type_name=u'ContainerMasterProjectsZonesImagereviewRequest', + request_type_name=u'ContainerMainProjectsZonesImagereviewRequest', response_type_name=u'ImageReviewResponse', supports_download=False, ) @@ -241,24 +241,24 @@ def Signcertificate(self, request, global_params=None): Signcertificate.method_config = lambda: base_api.ApiMethodInfo( http_method=u'POST', - method_id=u'container.masterProjects.zones.signcertificate', - ordered_params=[u'masterProjectId', u'zone', u'projectNumber', u'clusterId'], - path_params=[u'clusterId', u'masterProjectId', u'projectNumber', u'zone'], + method_id=u'container.mainProjects.zones.signcertificate', + ordered_params=[u'mainProjectId', u'zone', u'projectNumber', u'clusterId'], + path_params=[u'clusterId', u'mainProjectId', u'projectNumber', u'zone'], query_params=[], - relative_path=u'v1/masterProjects/{masterProjectId}/zones/{zone}/{projectNumber}/{clusterId}/signcertificate', + relative_path=u'v1/mainProjects/{mainProjectId}/zones/{zone}/{projectNumber}/{clusterId}/signcertificate', request_field='', request_type_name=u'CertificateSigningRequest', response_type_name=u'CertificateSigningRequest', supports_download=False, ) - class MasterProjectsService(base_api.BaseApiService): - """Service class for the masterProjects resource.""" + class MainProjectsService(base_api.BaseApiService): + """Service class for the mainProjects resource.""" - _NAME = u'masterProjects' + _NAME = u'mainProjects' def __init__(self, client): - super(ContainerV1.MasterProjectsService, self).__init__(client) + super(ContainerV1.MainProjectsService, self).__init__(client) self._upload_configs = { } diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_messages.py index df5fffc..a91bed0 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1/container_v1_messages.py @@ -33,7 +33,7 @@ class AddonsConfig(_messages.Message): class AuthenticateRequest(_messages.Message): """A request to authenticate a user based on a provided OAuth2 token. This should look very close to the TokenReview struct in http://github.com/kubern - etes/kubernetes/blob/master/pkg/apis/authentication.k8s.io/v1beta1/types.go. + etes/kubernetes/blob/main/pkg/apis/authentication.k8s.io/v1beta1/types.go. This message has 4 GKE-specific fields that get mapped from the path, but the other fields (the expected JSON payload) must match TokenReview. @@ -60,7 +60,7 @@ class AuthenticateRequest(_messages.Message): class AuthenticateResponse(_messages.Message): """A response with the authenticated identity. This should match exactly with the TokenReview struct from http://github.com/kubernetes/kubernetes/blo - b/master/pkg/apis/authentication.k8s.io/types.go. + b/main/pkg/apis/authentication.k8s.io/types.go. Fields: apiVersion: The api version of the TokenReview object. @@ -110,7 +110,7 @@ class AuthorizeRequest(_messages.Message): class AuthorizeResponse(_messages.Message): """A response to a request for authorization. This should match exactly with the SubjectAccessReview struct from http://github.com/kubernetes/kubernetes/ - blob/master/pkg/apis/v1beta1/authorization/types.go. + blob/main/pkg/apis/v1beta1/authorization/types.go. Fields: apiVersion: The api version of the SubjectAccessReview object. @@ -158,27 +158,27 @@ class CertificateSigningRequest(_messages.Message): Fields: apiVersion: The api version of the CertificateSigningRequest object. - clusterId: The name of this master's cluster. + clusterId: The name of this main's cluster. kind: The "kind" of the CertificateSigningRequest object. - masterProjectId: The hosted master project in which this master resides. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). metadata: Additional metadata about the Kubernetes object. projectNumber: The project number for which the certificate is being - signed. This is the project in which this master's cluster resides. + signed. This is the project in which this main's cluster resides. This is an int64, so it must be a project number, not a project ID. spec: The specification holds information about the certificate requesting to be signed. status: The status is populated at response time, and holds information about the success or failure of the operation along with the signed certificate. - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ apiVersion = _messages.StringField(1) clusterId = _messages.StringField(2) kind = _messages.StringField(3) - masterProjectId = _messages.StringField(4) + mainProjectId = _messages.StringField(4) metadata = _messages.MessageField('ObjectMeta', 5) projectNumber = _messages.IntegerField(6) spec = _messages.MessageField('CertificateSigningRequestSpec', 7) @@ -253,8 +253,8 @@ class Cluster(_messages.Message): automatically chosen or specify a `/14` block in `10.0.0.0/8`. createTime: [Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - currentMasterVersion: [Output only] The current software version of the - master endpoint. + currentMainVersion: [Output only] The current software version of the + main endpoint. currentNodeCount: [Output only] The number of nodes currently in the cluster. currentNodeVersion: [Output only] The current version of the node software @@ -264,17 +264,17 @@ class Cluster(_messages.Message): description: An optional description of this cluster. enableKubernetesAlpha: Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features - that may not be production ready in the kubernetes version of the master - and nodes. The cluster has no SLA for uptime and master/node upgrades + that may not be production ready in the kubernetes version of the main + and nodes. The cluster has no SLA for uptime and main/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation. - endpoint: [Output only] The IP address of this cluster's master endpoint. + endpoint: [Output only] The IP address of this cluster's main endpoint. The endpoint can be accessed from the internet at - `https://username:password@endpoint/`. See the `masterAuth` property of + `https://username:password@endpoint/`. See the `mainAuth` property of this resource for username and password information. expireTime: [Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. - initialClusterVersion: [Output only] The software version of the master + initialClusterVersion: [Output only] The software version of the main endpoint and kubelets used in the cluster when it was first created. The version can be upgraded over time. initialNodeCount: The number of nodes to create in this cluster. You must @@ -295,7 +295,7 @@ class Cluster(_messages.Message): Cloud Logging service. * `none` - no logs will be exported from the cluster. * if left as an empty string,`logging.googleapis.com` will be used. - masterAuth: The authentication information for accessing the master + mainAuth: The authentication information for accessing the main endpoint. monitoringService: The monitoring service the cluster should use to write metrics. Currently available options: * `monitoring.googleapis.com` - @@ -347,7 +347,7 @@ class StatusValueValuesEnum(_messages.Enum): RUNNING: The RUNNING state indicates the cluster has been created and is fully usable. RECONCILING: The RECONCILING state indicates that some work is actively - being done on the cluster, such as upgrading the master or node + being done on the cluster, such as upgrading the main or node software. Details can be found in the `statusMessage` field. STOPPING: The STOPPING state indicates the cluster is being deleted. ERROR: The ERROR state indicates the cluster may be unusable. Details @@ -363,7 +363,7 @@ class StatusValueValuesEnum(_messages.Enum): addonsConfig = _messages.MessageField('AddonsConfig', 1) clusterIpv4Cidr = _messages.StringField(2) createTime = _messages.StringField(3) - currentMasterVersion = _messages.StringField(4) + currentMainVersion = _messages.StringField(4) currentNodeCount = _messages.IntegerField(5, variant=_messages.Variant.INT32) currentNodeVersion = _messages.StringField(6) description = _messages.StringField(7) @@ -375,7 +375,7 @@ class StatusValueValuesEnum(_messages.Enum): instanceGroupUrls = _messages.StringField(13, repeated=True) locations = _messages.StringField(14, repeated=True) loggingService = _messages.StringField(15) - masterAuth = _messages.MessageField('MasterAuth', 16) + mainAuth = _messages.MessageField('MainAuth', 16) monitoringService = _messages.StringField(17) name = _messages.StringField(18) network = _messages.StringField(19) @@ -406,10 +406,10 @@ class ClusterUpdate(_messages.Message): nodes being either created or removed from the cluster, depending on whether locations are being added or removed. This list must always include the cluster's primary zone. - desiredMasterMachineType: The name of a Google Compute Engine [machine + desiredMainMachineType: The name of a Google Compute Engine [machine type](/compute/docs/machine-types) (e.g. `n1-standard-8`) to change the - master to. - desiredMasterVersion: The Kubernetes version to change the master to. The + main to. + desiredMainVersion: The Kubernetes version to change the main to. The only valid value is the latest supported version. Use "-" to have the server automatically select the latest version. desiredMonitoringService: The monitoring service the cluster should use to @@ -432,114 +432,114 @@ class ClusterUpdate(_messages.Message): desiredAddonsConfig = _messages.MessageField('AddonsConfig', 1) desiredImageType = _messages.StringField(2) desiredLocations = _messages.StringField(3, repeated=True) - desiredMasterMachineType = _messages.StringField(4) - desiredMasterVersion = _messages.StringField(5) + desiredMainMachineType = _messages.StringField(4) + desiredMainVersion = _messages.StringField(5) desiredMonitoringService = _messages.StringField(6) desiredNodePoolAutoscaling = _messages.MessageField('NodePoolAutoscaling', 7) desiredNodePoolId = _messages.StringField(8) desiredNodeVersion = _messages.StringField(9) -class ContainerMasterProjectsZonesAuthenticateRequest(_messages.Message): - """A ContainerMasterProjectsZonesAuthenticateRequest object. +class ContainerMainProjectsZonesAuthenticateRequest(_messages.Message): + """A ContainerMainProjectsZonesAuthenticateRequest object. Fields: authenticateRequest: A AuthenticateRequest resource to be passed as the request body. - clusterId: The name of this master's cluster. - masterProjectId: The hosted master project in which this master resides. + clusterId: The name of this main's cluster. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). projectNumber: The project number for which the signed URLs are being - requested. This is the project in which this master's cluster resides. + requested. This is the project in which this main's cluster resides. Note that this must be a project number, not a project ID. - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ authenticateRequest = _messages.MessageField('AuthenticateRequest', 1) clusterId = _messages.StringField(2, required=True) - masterProjectId = _messages.StringField(3, required=True) + mainProjectId = _messages.StringField(3, required=True) projectNumber = _messages.IntegerField(4, required=True) zone = _messages.StringField(5, required=True) -class ContainerMasterProjectsZonesAuthorizeRequest(_messages.Message): - """A ContainerMasterProjectsZonesAuthorizeRequest object. +class ContainerMainProjectsZonesAuthorizeRequest(_messages.Message): + """A ContainerMainProjectsZonesAuthorizeRequest object. Fields: authorizeRequest: A AuthorizeRequest resource to be passed as the request body. - clusterId: The name of this master's cluster. - masterProjectId: The hosted master project in which this master resides. + clusterId: The name of this main's cluster. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). projectNumber: The project number for which the request is being - authorized. This is the project in which this master's cluster resides. + authorized. This is the project in which this main's cluster resides. This is an int64, so it must be a project number, not a project ID. - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ authorizeRequest = _messages.MessageField('AuthorizeRequest', 1) clusterId = _messages.StringField(2, required=True) - masterProjectId = _messages.StringField(3, required=True) + mainProjectId = _messages.StringField(3, required=True) projectNumber = _messages.IntegerField(4, required=True) zone = _messages.StringField(5, required=True) -class ContainerMasterProjectsZonesImagereviewRequest(_messages.Message): - """A ContainerMasterProjectsZonesImagereviewRequest object. +class ContainerMainProjectsZonesImagereviewRequest(_messages.Message): + """A ContainerMainProjectsZonesImagereviewRequest object. Fields: - clusterId: The name of this master's cluster. + clusterId: The name of this main's cluster. imageReviewRequest: A ImageReviewRequest resource to be passed as the request body. - masterProjectId: The hosted master project in which this master resides. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). projectNumber: The project number for which the request is being - authorized. This is the project in which this master's cluster resides. + authorized. This is the project in which this main's cluster resides. This is an int64, so it must be a project number, not a project ID. - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ clusterId = _messages.StringField(1, required=True) imageReviewRequest = _messages.MessageField('ImageReviewRequest', 2) - masterProjectId = _messages.StringField(3, required=True) + mainProjectId = _messages.StringField(3, required=True) projectNumber = _messages.IntegerField(4, required=True) zone = _messages.StringField(5, required=True) -class ContainerMasterProjectsZonesSignedUrlsCreateRequest(_messages.Message): - """A ContainerMasterProjectsZonesSignedUrlsCreateRequest object. +class ContainerMainProjectsZonesSignedUrlsCreateRequest(_messages.Message): + """A ContainerMainProjectsZonesSignedUrlsCreateRequest object. Fields: createSignedUrlsRequest: A CreateSignedUrlsRequest resource to be passed as the request body. - masterProjectId: The hosted master project in which this master resides. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ createSignedUrlsRequest = _messages.MessageField('CreateSignedUrlsRequest', 1) - masterProjectId = _messages.StringField(2, required=True) + mainProjectId = _messages.StringField(2, required=True) zone = _messages.StringField(3, required=True) -class ContainerMasterProjectsZonesTokensCreateRequest(_messages.Message): - """A ContainerMasterProjectsZonesTokensCreateRequest object. +class ContainerMainProjectsZonesTokensCreateRequest(_messages.Message): + """A ContainerMainProjectsZonesTokensCreateRequest object. Fields: createTokenRequest: A CreateTokenRequest resource to be passed as the request body. - masterProjectId: The hosted master project in which this master resides. + mainProjectId: The hosted main project in which this main resides. This can be either a [project ID or project number](https://support.google.com/cloud/answer/6158840). - zone: The zone of this master's cluster. + zone: The zone of this main's cluster. """ createTokenRequest = _messages.MessageField('CreateTokenRequest', 1) - masterProjectId = _messages.StringField(2, required=True) + mainProjectId = _messages.StringField(2, required=True) zone = _messages.StringField(3, required=True) @@ -826,14 +826,14 @@ class CreateNodePoolRequest(_messages.Message): class CreateSignedUrlsRequest(_messages.Message): """A request for signed URLs that allow for writing a file to a private GCS - bucket for storing backups of hosted master data. + bucket for storing backups of hosted main data. Fields: - clusterId: The name of this master's cluster. + clusterId: The name of this main's cluster. filenames: The names of the files for which a signed URLs are being requested. projectNumber: The project number for which the signed URLs are being - requested. This is the project in which this master's cluster resides. + requested. This is the project in which this main's cluster resides. Note that this must be a project number, not a project ID. """ @@ -845,13 +845,13 @@ class CreateSignedUrlsRequest(_messages.Message): class CreateTokenRequest(_messages.Message): """A request for a compute-read-write (https://www.googleapis.com/auth/compute) scoped OAuth2 access token for - , to allow hosted masters to make modifications to a user's + , to allow hosted mains to make modifications to a user's project. Fields: - clusterId: The name of this master's cluster. + clusterId: The name of this main's cluster. projectNumber: The project number for which the access is being requested. - This is the project in which this master's cluster resides. Note that + This is the project in which this main's cluster resides. Note that this must be a project number, not a project ID. """ @@ -921,7 +921,7 @@ class ImageReviewRequest(_messages.Message): """A request to verify an image. The request contains the attributes of the container to create. These are passed to BCID for verification. This should look very close to the ImageReview struct in http://github.com/kubernetes/ku - bernetes/blob/master/pkg/apis/imagepolicy/v1beta1/types.go. This message has + bernetes/blob/main/pkg/apis/imagepolicy/v1beta1/types.go. This message has 4 GKE-specific fields that get mapped from the path, but the other fields (the expected JSON payload) must match ImageReview. @@ -947,7 +947,7 @@ class ImageReviewRequest(_messages.Message): class ImageReviewResponse(_messages.Message): """A response to a request for image verification. This should match exactly with the ImageReview struct from http://github.com/kubernetes/kubernetes/blo - b/master/pkg/apis/v1beta1/authorization/types.go. + b/main/pkg/apis/v1beta1/authorization/types.go. Fields: apiVersion: The api version of the ImageReview object. @@ -1068,8 +1068,8 @@ class ListOperationsResponse(_messages.Message): operations = _messages.MessageField('Operation', 2, repeated=True) -class MasterAuth(_messages.Message): - """The authentication information for accessing the master endpoint. +class MainAuth(_messages.Message): + """The authentication information for accessing the main endpoint. Authentication can be done using HTTP basic auth or using client certificates. @@ -1080,10 +1080,10 @@ class MasterAuth(_messages.Message): authenticate to the cluster endpoint. clusterCaCertificate: [Output only] Base64-encoded public certificate that is the root of trust for the cluster. - password: The password to use for HTTP basic authentication to the master - endpoint. Because the master endpoint is open to the Internet, you + password: The password to use for HTTP basic authentication to the main + endpoint. Because the main endpoint is open to the Internet, you should create a strong password. - username: The username to use for HTTP basic authentication to the master + username: The username to use for HTTP basic authentication to the main endpoint. """ @@ -1262,7 +1262,7 @@ class NodeManagement(_messages.Message): class NodePool(_messages.Message): """NodePool contains the name and configuration for a cluster's node pool. Node pools are a set of nodes (i.e. VM's), with a common configuration and - specification, under the control of the cluster master. They may have a set + specification, under the control of the cluster main. They may have a set of Kubernetes labels applied to them, which may be used to reference them during pod scheduling. They may also be resized up or down, to accommodate the workload. @@ -1419,7 +1419,7 @@ class OperationTypeValueValuesEnum(_messages.Enum): TYPE_UNSPECIFIED: Not set. CREATE_CLUSTER: Cluster create. DELETE_CLUSTER: Cluster delete. - UPGRADE_MASTER: A master upgrade. + UPGRADE_MASTER: A main upgrade. UPGRADE_NODES: A node upgrade. REPAIR_CLUSTER: Cluster repair. UPDATE_CLUSTER: Cluster update. @@ -1504,7 +1504,7 @@ class ServerConfig(_messages.Message): default. defaultImageType: Default image type. validImageTypes: List of valid image types. - validMasterVersions: List of valid master versions. + validMainVersions: List of valid main versions. validNodeVersions: List of valid node upgrade target versions. """ @@ -1512,7 +1512,7 @@ class ServerConfig(_messages.Message): defaultClusterVersion = _messages.StringField(2) defaultImageType = _messages.StringField(3) validImageTypes = _messages.StringField(4, repeated=True) - validMasterVersions = _messages.StringField(5, repeated=True) + validMainVersions = _messages.StringField(5, repeated=True) validNodeVersions = _messages.StringField(6, repeated=True) @@ -1529,7 +1529,7 @@ class SetNodePoolManagementRequest(_messages.Message): class SignedUrls(_messages.Message): """Signed URLs that allow for writing a file to a private GCS bucket for - storing backups of hosted master data. + storing backups of hosted main data. Fields: signedUrls: The signed URLs for writing the request files, in the same @@ -1679,7 +1679,7 @@ class SubjectAccessReviewStatus(_messages.Message): class Token(_messages.Message): """A compute-read-write (https://www.googleapis.com/auth/compute) scoped - OAuth2 access token, to allow hosted masters to make modifications to a + OAuth2 access token, to allow hosted mains to make modifications to a user's project. Fields: diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dataproc/v1/dataproc_v1_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dataproc/v1/dataproc_v1_messages.py index 7a30566..e6db513 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dataproc/v1/dataproc_v1_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/dataproc/v1/dataproc_v1_messages.py @@ -119,15 +119,15 @@ class ClusterConfig(_messages.Message): gceClusterConfig: Required The shared Google Compute Engine config settings for all instances in a cluster. initializationActions: Optional Commands to execute on each node after - config is completed. By default, executables are run on master and all + config is completed. By default, executables are run on main and all worker nodes. You can test a node's role metadata to run an - executable on a master or worker node, as shown below using curl (you + executable on a main or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if - [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else + [[ "${ROLE}" == 'Main' ]]; then ... main specific actions ... else ... worker specific actions ... fi - masterConfig: Optional The Google Compute Engine config settings for the - master instance in a cluster. + mainConfig: Optional The Google Compute Engine config settings for the + main instance in a cluster. secondaryWorkerConfig: Optional The Google Compute Engine config settings for additional worker instances in a cluster. softwareConfig: Optional The config settings for software inside the @@ -139,7 +139,7 @@ class ClusterConfig(_messages.Message): configBucket = _messages.StringField(1) gceClusterConfig = _messages.MessageField('GceClusterConfig', 2) initializationActions = _messages.MessageField('NodeInitializationAction', 3, repeated=True) - masterConfig = _messages.MessageField('InstanceGroupConfig', 4) + mainConfig = _messages.MessageField('InstanceGroupConfig', 4) secondaryWorkerConfig = _messages.MessageField('InstanceGroupConfig', 5) softwareConfig = _messages.MessageField('SoftwareConfig', 6) workerConfig = _messages.MessageField('InstanceGroupConfig', 7) @@ -969,7 +969,7 @@ class AdditionalProperty(_messages.Message): class InstanceGroupConfig(_messages.Message): """Optional The config settings for Google Compute Engine resources in an - instance group, such as a master or worker group. + instance group, such as a main or worker group. Fields: accelerators: Optional The Google Compute Engine accelerator configuration @@ -992,7 +992,7 @@ class InstanceGroupConfig(_messages.Message): Instance Group Manager that manages this group. This is only used for preemptible instance groups. numInstances: Required The number of VM instances in the instance group. - For master instance groups, must be set to 1. + For main instance groups, must be set to 1. """ accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True) @@ -2081,7 +2081,7 @@ class YarnApplication(_messages.Message): progress: Required The numerical progress of the application, from 1 to 100. state: Required The application state. - trackingUrl: Optional The HTTP URL of the ApplicationMaster, + trackingUrl: Optional The HTTP URL of the ApplicationMain, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access. diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/genomics/v1/genomics_v1_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/genomics/v1/genomics_v1_messages.py index d99a751..3f2a57f 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/genomics/v1/genomics_v1_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/genomics/v1/genomics_v1_messages.py @@ -1216,8 +1216,8 @@ class FormatValueValuesEnum(_messages.Enum): FORMAT_UNSPECIFIED: FORMAT_VCF: VCF (Variant Call Format). The VCF files may be gzip compressed. gVCF is also supported. - FORMAT_COMPLETE_GENOMICS: Complete Genomics masterVarBeta format. The - masterVarBeta files may be bzip2 compressed. + FORMAT_COMPLETE_GENOMICS: Complete Genomics mainVarBeta format. The + mainVarBeta files may be bzip2 compressed. """ FORMAT_UNSPECIFIED = 0 FORMAT_VCF = 1 diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1/ml_v1_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1/ml_v1_messages.py index c07b48f..8d52823 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1/ml_v1_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1/ml_v1_messages.py @@ -566,14 +566,14 @@ class GoogleCloudMlV1TrainingInput(_messages.Message): TensorFlow program as the 'job_dir' command-line argument. The benefit of specifying this field is that Cloud ML validates the path for use in training. - masterType: Optional. Specifies the type of virtual machine to use for - your training job's master worker. The following types are supported: + mainType: Optional. Specifies the type of virtual machine to use for + your training job's main worker. The following types are supported:
standard
A basic machine configuration suitable for training simple models with small to moderate datasets.
large_model
A machine with a lot of memory, specially suited for parameter servers when your model is large (having many hidden layers or layers with very large numbers of nodes).
-
complex_model_s
A machine suitable for the master and +
complex_model_s
A machine suitable for the main and workers of the cluster when your model requires more computation than the standard machine can handle satisfactorily.
complex_model_m
A machine with roughly twice the @@ -598,7 +598,7 @@ class GoogleCloudMlV1TrainingInput(_messages.Message): also set `parameter_server_type`. parameterServerType: Optional. Specifies the type of virtual machine to use for your training job's parameter server. The supported values are - the same as those described in the entry for `master_type`. This value + the same as those described in the entry for `main_type`. This value must be present when `scaleTier` is set to `CUSTOM` and `parameter_server_count` is greater than zero. pythonModule: Required. The Python module name to run after installing the @@ -616,7 +616,7 @@ class GoogleCloudMlV1TrainingInput(_messages.Message): to `CUSTOM`. If you set this value, you must also set `worker_type`. workerType: Optional. Specifies the type of virtual machine to use for your training job's worker nodes. The supported values are the same as - those described in the entry for `masterType`. This value must be + those described in the entry for `mainType`. This value must be present when `scaleTier` is set to `CUSTOM` and `workerCount` is greater than zero. """ @@ -636,8 +636,8 @@ class ScaleTierValueValuesEnum(_messages.Enum): CUSTOM: The CUSTOM tier is not a set tier, but rather enables you to use your own cluster specification. When you use this tier, set values to configure your processing cluster according to these guidelines: * - You _must_ set `TrainingInput.masterType` to specify the type of - machine to use for your master node. This is the only required + You _must_ set `TrainingInput.mainType` to specify the type of + machine to use for your main node. This is the only required setting. * You _may_ set `TrainingInput.workerCount` to specify the number of workers to use. If you specify one or more workers, you _must_ also set `TrainingInput.workerType` to specify the type of @@ -647,9 +647,9 @@ class ScaleTierValueValuesEnum(_messages.Enum): servers, you _must_ also set `TrainingInput.parameterServerType` to specify the type of machine to use for your parameter servers. Note that all of your workers must use the same machine type, which - can be different from your parameter server type and master type. Your + can be different from your parameter server type and main type. Your parameter servers must likewise use the same machine type, which can - be different from your worker type and master type. + be different from your worker type and main type. """ BASIC = 0 STANDARD_1 = 1 @@ -660,7 +660,7 @@ class ScaleTierValueValuesEnum(_messages.Enum): args = _messages.StringField(1, repeated=True) hyperparameters = _messages.MessageField('GoogleCloudMlV1HyperparameterSpec', 2) jobDir = _messages.StringField(3) - masterType = _messages.StringField(4) + mainType = _messages.StringField(4) packageUris = _messages.StringField(5, repeated=True) parameterServerCount = _messages.IntegerField(6) parameterServerType = _messages.StringField(7) diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1beta1/ml_v1beta1_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1beta1/ml_v1beta1_messages.py index 2acdd0c..61208a3 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1beta1/ml_v1beta1_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/ml/v1beta1/ml_v1beta1_messages.py @@ -646,14 +646,14 @@ class GoogleCloudMlV1beta1TrainingInput(_messages.Message): TensorFlow program as the 'job_dir' command-line argument. The benefit of specifying this field is that Cloud ML validates the path for use in training. - masterType: Optional. Specifies the type of virtual machine to use for - your training job's master worker. The following types are supported: + mainType: Optional. Specifies the type of virtual machine to use for + your training job's main worker. The following types are supported:
standard
A basic machine configuration suitable for training simple models with small to moderate datasets.
large_model
A machine with a lot of memory, specially suited for parameter servers when your model is large (having many hidden layers or layers with very large numbers of nodes).
-
complex_model_s
A machine suitable for the master and +
complex_model_s
A machine suitable for the main and workers of the cluster when your model requires more computation than the standard machine can handle satisfactorily.
complex_model_m
A machine with roughly twice the @@ -678,7 +678,7 @@ class GoogleCloudMlV1beta1TrainingInput(_messages.Message): also set `parameter_server_type`. parameterServerType: Optional. Specifies the type of virtual machine to use for your training job's parameter server. The supported values are - the same as those described in the entry for `master_type`. This value + the same as those described in the entry for `main_type`. This value must be present when `scaleTier` is set to `CUSTOM` and `parameter_server_count` is greater than zero. pythonModule: Required. The Python module name to run after installing the @@ -696,7 +696,7 @@ class GoogleCloudMlV1beta1TrainingInput(_messages.Message): to `CUSTOM`. If you set this value, you must also set `worker_type`. workerType: Optional. Specifies the type of virtual machine to use for your training job's worker nodes. The supported values are the same as - those described in the entry for `masterType`. This value must be + those described in the entry for `mainType`. This value must be present when `scaleTier` is set to `CUSTOM` and `workerCount` is greater than zero. """ @@ -716,8 +716,8 @@ class ScaleTierValueValuesEnum(_messages.Enum): CUSTOM: The CUSTOM tier is not a set tier, but rather enables you to use your own cluster specification. When you use this tier, set values to configure your processing cluster according to these guidelines: * - You _must_ set `TrainingInput.masterType` to specify the type of - machine to use for your master node. This is the only required + You _must_ set `TrainingInput.mainType` to specify the type of + machine to use for your main node. This is the only required setting. * You _may_ set `TrainingInput.workerCount` to specify the number of workers to use. If you specify one or more workers, you _must_ also set `TrainingInput.workerType` to specify the type of @@ -727,9 +727,9 @@ class ScaleTierValueValuesEnum(_messages.Enum): servers, you _must_ also set `TrainingInput.parameterServerType` to specify the type of machine to use for your parameter servers. Note that all of your workers must use the same machine type, which - can be different from your parameter server type and master type. Your + can be different from your parameter server type and main type. Your parameter servers must likewise use the same machine type, which can - be different from your worker type and master type. + be different from your worker type and main type. """ BASIC = 0 STANDARD_1 = 1 @@ -740,7 +740,7 @@ class ScaleTierValueValuesEnum(_messages.Enum): args = _messages.StringField(1, repeated=True) hyperparameters = _messages.MessageField('GoogleCloudMlV1beta1HyperparameterSpec', 2) jobDir = _messages.StringField(3) - masterType = _messages.StringField(4) + mainType = _messages.StringField(4) packageUris = _messages.StringField(5, repeated=True) parameterServerCount = _messages.IntegerField(6) parameterServerType = _messages.StringField(7) diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta3/sqladmin_v1beta3_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta3/sqladmin_v1beta3_messages.py index 89ee704..a7fca08 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta3/sqladmin_v1beta3_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta3/sqladmin_v1beta3_messages.py @@ -153,7 +153,7 @@ class DatabaseInstance(_messages.Message): ipAddresses: The assigned IP addresses for the instance. ipv6Address: The IPv6 address assigned to the instance. kind: This is always sql#instance. - masterInstanceName: The name of the instance which will act as master in + mainInstanceName: The name of the instance which will act as main in the replication setup. maxDiskSize: The maximum disk size of the instance in bytes. project: The project ID of the project containing the Cloud SQL instance. @@ -183,7 +183,7 @@ class DatabaseInstance(_messages.Message): ipAddresses = _messages.MessageField('IpMapping', 7, repeated=True) ipv6Address = _messages.StringField(8) kind = _messages.StringField(9, default=u'sql#instance') - masterInstanceName = _messages.StringField(10) + mainInstanceName = _messages.StringField(10) maxDiskSize = _messages.IntegerField(11) project = _messages.StringField(12) region = _messages.StringField(13) diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta4/sqladmin_v1beta4_messages.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta4/sqladmin_v1beta4_messages.py index 82d7143..8da2eeb 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta4/sqladmin_v1beta4_messages.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/sqladmin/v1beta4/sqladmin_v1beta4_messages.py @@ -210,14 +210,14 @@ class DatabaseInstance(_messages.Message): property is applicable only to Second Generation instances. instanceType: The instance type. This can be one of the following. CLOUD_SQL_INSTANCE: A Cloud SQL instance that is not replicating from a - master. ON_PREMISES_INSTANCE: An instance running on the customer's + main. ON_PREMISES_INSTANCE: An instance running on the customer's premises. READ_REPLICA_INSTANCE: A Cloud SQL instance configured as a read-replica. ipAddresses: The assigned IP addresses for the instance. ipv6Address: The IPv6 address assigned to the instance. This property is applicable only to First Generation instances. kind: This is always sql#instance. - masterInstanceName: The name of the instance which will act as master in + mainInstanceName: The name of the instance which will act as main in the replication setup. maxDiskSize: The maximum disk size of the instance in bytes. name: Name of the Cloud SQL instance. This does not include the project @@ -231,7 +231,7 @@ class DatabaseInstance(_messages.Message): type (First Generation or Second Generation). The region can not be changed after instance creation. replicaConfiguration: Configuration specific to read-replicas replicating - from on-premises masters. + from on-premises mains. replicaNames: The replicas of the instance. selfLink: The URI of this resource. serverCaCert: SSL configuration. @@ -256,7 +256,7 @@ class FailoverReplicaValue(_messages.Message): Fields: available: The availability status of the failover replica. A false - status indicates that the failover replica is out of sync. The master + status indicates that the failover replica is out of sync. The main can only failover to the falover replica when the status is true. name: The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name @@ -277,7 +277,7 @@ class FailoverReplicaValue(_messages.Message): ipAddresses = _messages.MessageField('IpMapping', 8, repeated=True) ipv6Address = _messages.StringField(9) kind = _messages.StringField(10, default=u'sql#instance') - masterInstanceName = _messages.StringField(11) + mainInstanceName = _messages.StringField(11) maxDiskSize = _messages.IntegerField(12) name = _messages.StringField(13) onPremisesConfiguration = _messages.MessageField('OnPremisesConfiguration', 14) @@ -620,24 +620,24 @@ class MySqlReplicaConfiguration(_messages.Message): Fields: caCertificate: PEM representation of the trusted CA's x509 certificate. - clientCertificate: PEM representation of the slave's x509 certificate. - clientKey: PEM representation of the slave's private key. The + clientCertificate: PEM representation of the subordinate's x509 certificate. + clientKey: PEM representation of the subordinate's private key. The corresponsing public key is encoded in the client's certificate. connectRetryInterval: Seconds to wait between connect retries. MySQL's default is 60 seconds. dumpFilePath: Path to a SQL dump file in Google Cloud Storage from which - the slave instance is to be created. The URI is in the form + the subordinate instance is to be created. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are also supported. Dumps should have the binlog co-ordinates from which - replication should begin. This can be accomplished by setting --master- + replication should begin. This can be accomplished by setting --main- data to 1 when using mysqldump. kind: This is always sql#mysqlReplicaConfiguration. - masterHeartbeatPeriod: Interval in milliseconds between replication + mainHeartbeatPeriod: Interval in milliseconds between replication heartbeats. password: The password for the replication connection. sslCipher: A list of permissible ciphers to use for SSL encryption. username: The username for the replication connection. - verifyServerCertificate: Whether or not to check the master's Common Name + verifyServerCertificate: Whether or not to check the main's Common Name value in the certificate that it sends during the SSL handshake. """ @@ -647,7 +647,7 @@ class MySqlReplicaConfiguration(_messages.Message): connectRetryInterval = _messages.IntegerField(4, variant=_messages.Variant.INT32) dumpFilePath = _messages.StringField(5) kind = _messages.StringField(6, default=u'sql#mysqlReplicaConfiguration') - masterHeartbeatPeriod = _messages.IntegerField(7) + mainHeartbeatPeriod = _messages.IntegerField(7) password = _messages.StringField(8) sslCipher = _messages.StringField(9) username = _messages.StringField(10) @@ -761,22 +761,22 @@ class OperationsListResponse(_messages.Message): class ReplicaConfiguration(_messages.Message): - """Read-replica configuration for connecting to the master. + """Read-replica configuration for connecting to the main. Fields: failoverTarget: Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover - replica. In case the master instance fails, the replica instance will be - promoted as the new master instance. Only one replica can be specified + replica. In case the main instance fails, the replica instance will be + promoted as the new main instance. Only one replica can be specified as failover target, and the replica has to be in different zone with the - master instance. + main instance. kind: This is always sql#replicaConfiguration. mysqlReplicaConfiguration: MySQL specific configuration when replicating - from a MySQL on-premises master. Replication configuration information + from a MySQL on-premises main. Replication configuration information such as the username, password, certificates, and keys are not stored in the instance metadata. The configuration information is used only to set up the replication connection and is stored by MySQL in a file named - master.info in the data directory. + main.info in the data directory. """ failoverTarget = _messages.BooleanField(1) diff --git a/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/proto/protocoltype_pb.py b/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/proto/protocoltype_pb.py index 20551f2..fadad63 100755 --- a/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/proto/protocoltype_pb.py +++ b/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/proto/protocoltype_pb.py @@ -1053,8 +1053,8 @@ def DeclaredType_Name(cls, x): return cls._DeclaredType_NAMES.get(x, "") proto_name_ = "" has_proto2_file_descriptor_ = 0 proto2_file_descriptor_ = "" - has_proto2_file_master_ = 0 - proto2_file_master_ = "" + has_proto2_file_main_ = 0 + proto2_file_main_ = "" has_proto2_name_ = 0 proto2_name_ = "" has_proto2_extension_info_ = 0 @@ -1151,18 +1151,18 @@ def clear_proto2_file_descriptor(self): def has_proto2_file_descriptor(self): return self.has_proto2_file_descriptor_ - def proto2_file_master(self): return self.proto2_file_master_ + def proto2_file_main(self): return self.proto2_file_main_ - def set_proto2_file_master(self, x): - self.has_proto2_file_master_ = 1 - self.proto2_file_master_ = x + def set_proto2_file_main(self, x): + self.has_proto2_file_main_ = 1 + self.proto2_file_main_ = x - def clear_proto2_file_master(self): - if self.has_proto2_file_master_: - self.has_proto2_file_master_ = 0 - self.proto2_file_master_ = "" + def clear_proto2_file_main(self): + if self.has_proto2_file_main_: + self.has_proto2_file_main_ = 0 + self.proto2_file_main_ = "" - def has_proto2_file_master(self): return self.has_proto2_file_master_ + def has_proto2_file_main(self): return self.has_proto2_file_main_ def proto2_name(self): return self.proto2_name_ @@ -1212,7 +1212,7 @@ def MergeFrom(self, x): for i in xrange(x.tag_size()): self.add_tag().CopyFrom(x.tag(i)) for i in xrange(x.enumtype_size()): self.add_enumtype().CopyFrom(x.enumtype(i)) if (x.has_proto2_file_descriptor()): self.set_proto2_file_descriptor(x.proto2_file_descriptor()) - if (x.has_proto2_file_master()): self.set_proto2_file_master(x.proto2_file_master()) + if (x.has_proto2_file_main()): self.set_proto2_file_main(x.proto2_file_main()) if (x.has_proto2_name()): self.set_proto2_name(x.proto2_name()) if (x.has_proto2_extension_info()): self.set_proto2_extension_info(x.proto2_extension_info()) if (x.has_proto2_file_scope_info()): self.set_proto2_file_scope_info(x.proto2_file_scope_info()) @@ -1260,8 +1260,8 @@ def Equals(self, x): if e1 != e2: return 0 if self.has_proto2_file_descriptor_ != x.has_proto2_file_descriptor_: return 0 if self.has_proto2_file_descriptor_ and self.proto2_file_descriptor_ != x.proto2_file_descriptor_: return 0 - if self.has_proto2_file_master_ != x.has_proto2_file_master_: return 0 - if self.has_proto2_file_master_ and self.proto2_file_master_ != x.proto2_file_master_: return 0 + if self.has_proto2_file_main_ != x.has_proto2_file_main_: return 0 + if self.has_proto2_file_main_ and self.proto2_file_main_ != x.proto2_file_main_: return 0 if self.has_proto2_name_ != x.has_proto2_name_: return 0 if self.has_proto2_name_ and self.proto2_name_ != x.proto2_name_: return 0 if self.has_proto2_extension_info_ != x.has_proto2_extension_info_: return 0 @@ -1296,7 +1296,7 @@ def ByteSize(self): n += 2 * len(self.enumtype_) for i in xrange(len(self.enumtype_)): n += self.enumtype_[i].ByteSize() if (self.has_proto2_file_descriptor_): n += 2 + self.lengthString(len(self.proto2_file_descriptor_)) - if (self.has_proto2_file_master_): n += 2 + self.lengthString(len(self.proto2_file_master_)) + if (self.has_proto2_file_main_): n += 2 + self.lengthString(len(self.proto2_file_main_)) if (self.has_proto2_name_): n += 2 + self.lengthString(len(self.proto2_name_)) if (self.has_proto2_extension_info_): n += 2 + self.lengthString(len(self.proto2_extension_info_)) if (self.has_proto2_file_scope_info_): n += 2 + self.lengthString(len(self.proto2_file_scope_info_)) @@ -1316,7 +1316,7 @@ def ByteSizePartial(self): n += 2 * len(self.enumtype_) for i in xrange(len(self.enumtype_)): n += self.enumtype_[i].ByteSizePartial() if (self.has_proto2_file_descriptor_): n += 2 + self.lengthString(len(self.proto2_file_descriptor_)) - if (self.has_proto2_file_master_): n += 2 + self.lengthString(len(self.proto2_file_master_)) + if (self.has_proto2_file_main_): n += 2 + self.lengthString(len(self.proto2_file_main_)) if (self.has_proto2_name_): n += 2 + self.lengthString(len(self.proto2_name_)) if (self.has_proto2_extension_info_): n += 2 + self.lengthString(len(self.proto2_extension_info_)) if (self.has_proto2_file_scope_info_): n += 2 + self.lengthString(len(self.proto2_file_scope_info_)) @@ -1329,7 +1329,7 @@ def Clear(self): self.clear_tag() self.clear_enumtype() self.clear_proto2_file_descriptor() - self.clear_proto2_file_master() + self.clear_proto2_file_main() self.clear_proto2_name() self.clear_proto2_extension_info() self.clear_proto2_file_scope_info() @@ -1350,9 +1350,9 @@ def OutputUnchecked(self, out): if (self.has_proto2_file_descriptor_): out.putVarInt32(186) out.putPrefixedString(self.proto2_file_descriptor_) - if (self.has_proto2_file_master_): + if (self.has_proto2_file_main_): out.putVarInt32(194) - out.putPrefixedString(self.proto2_file_master_) + out.putPrefixedString(self.proto2_file_main_) if (self.has_proto2_name_): out.putVarInt32(202) out.putPrefixedString(self.proto2_name_) @@ -1384,9 +1384,9 @@ def OutputPartial(self, out): if (self.has_proto2_file_descriptor_): out.putVarInt32(186) out.putPrefixedString(self.proto2_file_descriptor_) - if (self.has_proto2_file_master_): + if (self.has_proto2_file_main_): out.putVarInt32(194) - out.putPrefixedString(self.proto2_file_master_) + out.putPrefixedString(self.proto2_file_main_) if (self.has_proto2_name_): out.putVarInt32(202) out.putPrefixedString(self.proto2_name_) @@ -1419,7 +1419,7 @@ def TryMerge(self, d): self.set_proto2_file_descriptor(d.getPrefixedString()) continue if tt == 194: - self.set_proto2_file_master(d.getPrefixedString()) + self.set_proto2_file_main(d.getPrefixedString()) continue if tt == 202: self.set_proto2_name(d.getPrefixedString()) @@ -1461,7 +1461,7 @@ def __str__(self, prefix="", printElemNumber=0): res+=prefix+"}\n" cnt+=1 if self.has_proto2_file_descriptor_: res+=prefix+("proto2_file_descriptor: %s\n" % self.DebugFormatString(self.proto2_file_descriptor_)) - if self.has_proto2_file_master_: res+=prefix+("proto2_file_master: %s\n" % self.DebugFormatString(self.proto2_file_master_)) + if self.has_proto2_file_main_: res+=prefix+("proto2_file_main: %s\n" % self.DebugFormatString(self.proto2_file_main_)) if self.has_proto2_name_: res+=prefix+("proto2_name: %s\n" % self.DebugFormatString(self.proto2_name_)) if self.has_proto2_extension_info_: res+=prefix+("proto2_extension_info: %s\n" % self.DebugFormatString(self.proto2_extension_info_)) if self.has_proto2_file_scope_info_: res+=prefix+("proto2_file_scope_info: %s\n" % self.DebugFormatString(self.proto2_file_scope_info_)) @@ -1499,7 +1499,7 @@ def _BuildTagLookupTable(sparse, maxtag, default=None): kEnumTypeproto2_name = 31 kEnumTypeallow_alias = 33 kproto2_file_descriptor = 23 - kproto2_file_master = 24 + kproto2_file_main = 24 kproto2_name = 25 kproto2_extension_info = 29 kproto2_file_scope_info = 30 @@ -1529,7 +1529,7 @@ def _BuildTagLookupTable(sparse, maxtag, default=None): 21: "name", 22: "value", 23: "proto2_file_descriptor", - 24: "proto2_file_master", + 24: "proto2_file_main", 25: "proto2_name", 26: "deprecated", 27: "proto_name", diff --git a/google-cloud-sdk/lib/surface/container/clusters/get_credentials.py b/google-cloud-sdk/lib/surface/container/clusters/get_credentials.py index 44dd0de..b8e2f3c 100755 --- a/google-cloud-sdk/lib/surface/container/clusters/get_credentials.py +++ b/google-cloud-sdk/lib/surface/container/clusters/get_credentials.py @@ -67,7 +67,7 @@ def Run(self, args): log.status.Print('Fetching cluster endpoint and auth data.') # Call DescribeCluster to get auth info and cache for next time cluster = adapter.GetCluster(cluster_ref) - auth = cluster.masterAuth + auth = cluster.mainAuth has_creds = (auth and ((auth.clientCertificate and auth.clientKey) or (auth.username and auth.password))) if not has_creds and not util.ClusterConfig.UseGCPAuthProvider(cluster): diff --git a/google-cloud-sdk/lib/surface/container/clusters/upgrade.py b/google-cloud-sdk/lib/surface/container/clusters/upgrade.py index a4511ef..32cca8e 100755 --- a/google-cloud-sdk/lib/surface/container/clusters/upgrade.py +++ b/google-cloud-sdk/lib/surface/container/clusters/upgrade.py @@ -47,20 +47,20 @@ class UpgradeHelpText(object): class VersionVerifier(object): - """Compares the cluster and master versions for upgrade availablity.""" + """Compares the cluster and main versions for upgrade availablity.""" UP_TO_DATE = 0 UPGRADE_AVAILABLE = 1 SUPPORT_ENDING = 2 UNSUPPORTED = 3 - def Compare(self, current_master_version, current_cluster_version): - """Compares the cluster and master versions and returns an enum.""" - # TODO(user):update the if condition when we roll the master version - if current_master_version == current_cluster_version: + def Compare(self, current_main_version, current_cluster_version): + """Compares the cluster and main versions and returns an enum.""" + # TODO(user):update the if condition when we roll the main version + if current_main_version == current_cluster_version: return self.UP_TO_DATE - master_version = SemVer(current_master_version) + main_version = SemVer(current_main_version) cluster_version = SemVer(current_cluster_version) - major, minor, _ = master_version.Distance(cluster_version) + major, minor, _ = main_version.Distance(cluster_version) if major != 0 or minor > 2: return self.UNSUPPORTED elif minor > 1: @@ -86,7 +86,7 @@ def _Args(parser): The Kubernetes release version to which to upgrade the cluster's nodes. If provided, the --cluster-version must be no greater than the cluster -master's minor version (x.*X*.x), and must be a latest patch version +main's minor version (x.*X*.x), and must be a latest patch version (x.x.*X*). You can find the list of allowed versions for upgrades by running: @@ -97,10 +97,10 @@ def _Args(parser): '--node-pool', help='The node pool to upgrade.') parser.add_argument( - '--master', - help='Upgrade the cluster\'s master to the latest version of Kubernetes' + '--main', + help='Upgrade the cluster\'s main to the latest version of Kubernetes' ' supported on Container Engine. Nodes cannot be upgraded at the same' - ' time as the master.', + ' time as the main.', action='store_true') flags.AddClustersWaitAndAsyncFlags(parser) flags.AddImageTypeFlag(parser, 'cluster/node pool') @@ -133,8 +133,8 @@ def Run(self, args): options = api_adapter.UpdateClusterOptions( version=args.cluster_version, - update_master=args.master, - update_nodes=(not args.master), + update_main=args.main, + update_nodes=(not args.main), node_pool=args.node_pool, image_type=args.image_type) @@ -142,11 +142,11 @@ def Run(self, args): new_version_message = 'version [{new_version}]'.format( new_version=options.version) else: - new_version_message = 'master version' + new_version_message = 'main version' - if args.master: - node_message = 'Master' - current_version = cluster.currentMasterVersion + if args.main: + node_message = 'Main' + current_version = cluster.currentMainVersion else: node_message = 'All {node_count} {node}'.format( node_count=cluster.currentNodeCount, @@ -182,7 +182,7 @@ def Run(self, args): Upgrades the Kubernetes version of an existing container cluster. This command upgrades the Kubernetes version of the *nodes* of a cluster. - The Kubernetes version of the cluster's *master* is periodically upgraded + The Kubernetes version of the cluster's *main* is periodically upgraded automatically as new releases are available. *By running this command, all of the cluster's nodes will be deleted and* @@ -198,7 +198,7 @@ def Run(self, args): """, 'EXAMPLES': """\ Upgrade the nodes of to the Kubernetes version of the cluster's - master. + main. $ {command} diff --git a/google-cloud-sdk/lib/surface/dataproc/clusters/create.py b/google-cloud-sdk/lib/surface/dataproc/clusters/create.py index 00de175..9e4cae6 100755 --- a/google-cloud-sdk/lib/surface/dataproc/clusters/create.py +++ b/google-cloud-sdk/lib/surface/dataproc/clusters/create.py @@ -54,8 +54,8 @@ def _CommonArgs(parser): type=int, help='The number of preemptible worker nodes in the cluster.') parser.add_argument( - '--master-machine-type', - help='The type of machine to use for the master. Defaults to ' + '--main-machine-type', + help='The type of machine to use for the main. Defaults to ' 'server-specified.') parser.add_argument( '--worker-machine-type', @@ -96,9 +96,9 @@ def _CommonArgs(parser): type=int, help='The number of local SSDs to attach to each worker in a cluster.') parser.add_argument( - '--num-master-local-ssds', + '--num-main-local-ssds', type=int, - help='The number of local SSDs to attach to the master in a cluster.') + help='The number of local SSDs to attach to the main in a cluster.') parser.add_argument( '--initialization-actions', type=arg_parsers.ArgList(min_length=1), @@ -183,12 +183,12 @@ def _CommonArgs(parser): additional_scopes='\n'.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS), aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP)) - master_boot_disk = parser.add_mutually_exclusive_group() + main_boot_disk = parser.add_mutually_exclusive_group() worker_boot_disk = parser.add_mutually_exclusive_group() # Deprecated, to be removed at a future date. - master_boot_disk.add_argument( - '--master-boot-disk-size-gb', + main_boot_disk.add_argument( + '--main-boot-disk-size-gb', type=int, hidden=True) worker_boot_disk.add_argument( @@ -203,8 +203,8 @@ def _CommonArgs(parser): ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk can have is 10 GB. Disk size must be a multiple of 1 GB. """ - master_boot_disk.add_argument( - '--master-boot-disk-size', + main_boot_disk.add_argument( + '--main-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help=boot_disk_size_detailed_help) worker_boot_disk.add_argument( @@ -240,15 +240,15 @@ class Create(base.CreateCommand): @staticmethod def Args(parser): _CommonArgs(parser) - parser.add_argument('--num-masters', type=int, hidden=True) + parser.add_argument('--num-mains', type=int, hidden=True) parser.add_argument('--single-node', action='store_true', hidden=True) @staticmethod def ValidateArgs(args): - if args.master_boot_disk_size_gb: - log.warn('The --master-boot-disk-size-gb flag is deprecated. ' - 'Use equivalent --master-boot-disk-size=%sGB flag.', - args.master_boot_disk_size_gb) + if args.main_boot_disk_size_gb: + log.warn('The --main-boot-disk-size-gb flag is deprecated. ' + 'Use equivalent --main-boot-disk-size=%sGB flag.', + args.main_boot_disk_size_gb) if args.worker_boot_disk_size_gb: log.warn('The --worker-boot-disk-size-gb flag is deprecated. ' @@ -281,14 +281,14 @@ def Run(self, args): compute_resources = compute_helpers.GetComputeResources( self.ReleaseTrack(), args.name) - master_accelerator_type = None + main_accelerator_type = None worker_accelerator_type = None - master_accelerator_count = None + main_accelerator_count = None worker_accelerator_count = None if self.ReleaseTrack() == base.ReleaseTrack.BETA: - if args.master_accelerator: - master_accelerator_type = args.master_accelerator['type'] - master_accelerator_count = args.master_accelerator.get('count', 1) + if args.main_accelerator: + main_accelerator_type = args.main_accelerator['type'] + main_accelerator_count = args.main_accelerator.get('count', 1) if args.worker_accelerator: worker_accelerator_type = args.worker_accelerator['type'] worker_accelerator_count = args.worker_accelerator.get('count', 1) @@ -297,9 +297,9 @@ def Run(self, args): zone_ref = compute_resources.Parse(None, collection='compute.zones') image_ref = args.image and compute_resources.Parse( args.image, collection='compute.images') - master_machine_type_ref = ( - args.master_machine_type and compute_resources.Parse( - args.master_machine_type, collection='compute.machineTypes')) + main_machine_type_ref = ( + args.main_machine_type and compute_resources.Parse( + args.main_machine_type, collection='compute.machineTypes')) worker_machine_type_ref = ( args.worker_machine_type and compute_resources.Parse( args.worker_machine_type, collection='compute.machineTypes')) @@ -307,9 +307,9 @@ def Run(self, args): args.network, collection='compute.networks') subnetwork_ref = args.subnet and compute_resources.Parse( args.subnet, collection='compute.subnetworks') - master_accelerator_type_ref = ( - master_accelerator_type and compute_resources.Parse( - master_accelerator_type, collection='compute.acceleratorTypes')) + main_accelerator_type_ref = ( + main_accelerator_type and compute_resources.Parse( + main_accelerator_type, collection='compute.acceleratorTypes')) worker_accelerator_type_ref = ( worker_accelerator_type and compute_resources.Parse( worker_accelerator_type, collection='compute.acceleratorTypes')) @@ -325,10 +325,10 @@ def Run(self, args): software_config = messages.SoftwareConfig( imageVersion=args.image_version) - master_boot_disk_size_gb = args.master_boot_disk_size_gb - if args.master_boot_disk_size: - master_boot_disk_size_gb = ( - api_utils.BytesToGb(args.master_boot_disk_size)) + main_boot_disk_size_gb = args.main_boot_disk_size_gb + if args.main_boot_disk_size: + main_boot_disk_size_gb = ( + api_utils.BytesToGb(args.main_boot_disk_size)) worker_boot_disk_size_gb = args.worker_boot_disk_size_gb if args.worker_boot_disk_size: @@ -360,13 +360,13 @@ def Run(self, args): gce_cluster_config.metadata = encoding.DictToMessage( flat_metadata, messages.GceClusterConfig.MetadataValue) - master_accelerators = [] - if master_accelerator_type: - master_accelerators.append( + main_accelerators = [] + if main_accelerator_type: + main_accelerators.append( messages.AcceleratorConfig( - acceleratorTypeUri=master_accelerator_type_ref and - master_accelerator_type_ref.SelfLink(), - acceleratorCount=master_accelerator_count)) + acceleratorTypeUri=main_accelerator_type_ref and + main_accelerator_type_ref.SelfLink(), + acceleratorCount=main_accelerator_count)) worker_accelerators = [] if worker_accelerator_type: worker_accelerators.append(messages.AcceleratorConfig( @@ -377,15 +377,15 @@ def Run(self, args): cluster_config = messages.ClusterConfig( configBucket=args.bucket, gceClusterConfig=gce_cluster_config, - masterConfig=messages.InstanceGroupConfig( - numInstances=args.num_masters, + mainConfig=messages.InstanceGroupConfig( + numInstances=args.num_mains, imageUri=image_ref and image_ref.SelfLink(), - machineTypeUri=master_machine_type_ref and - master_machine_type_ref.SelfLink(), - accelerators=master_accelerators, + machineTypeUri=main_machine_type_ref and + main_machine_type_ref.SelfLink(), + accelerators=main_accelerators, diskConfig=messages.DiskConfig( - bootDiskSizeGb=master_boot_disk_size_gb, - numLocalSsds=args.num_master_local_ssds, + bootDiskSizeGb=main_boot_disk_size_gb, + numLocalSsds=args.num_main_local_ssds, ), ), workerConfig=messages.InstanceGroupConfig( @@ -470,14 +470,14 @@ class CreateBeta(Create): def Args(parser): _CommonArgs(parser) parser.add_argument( - '--num-masters', + '--num-mains', type=int, help="""\ - The number of master nodes in the cluster. + The number of main nodes in the cluster. [format="csv",options="header"] |======== - Number of Masters,Cluster Mode + Number of Mains,Cluster Mode 1,Standard 3,High Availability |======== @@ -489,11 +489,11 @@ def Args(parser): help="""\ Create a single node cluster. - A single node cluster has all master and worker components. + A single node cluster has all main and worker components. It cannot have any separate worker nodes. """) - for instance_type in ('master', 'worker'): + for instance_type in ('main', 'worker'): help_msg = """\ Attaches accelerators (e.g. GPUs) to the {instance_type} instance(s). @@ -524,10 +524,10 @@ def Args(parser): @staticmethod def ValidateArgs(args): - if args.master_accelerator and 'type' not in args.master_accelerator: + if args.main_accelerator and 'type' not in args.main_accelerator: raise exceptions.InvalidArgumentException( - '--master-accelerator', 'accelerator type must be specified. ' - 'e.g. --master-accelerator type=nvidia-tesla-k80,count=2') + '--main-accelerator', 'accelerator type must be specified. ' + 'e.g. --main-accelerator type=nvidia-tesla-k80,count=2') if args.worker_accelerator and 'type' not in args.worker_accelerator: raise exceptions.InvalidArgumentException( '--worker-accelerator', 'accelerator type must be specified. ' diff --git a/google-cloud-sdk/lib/surface/functions/deploy.py b/google-cloud-sdk/lib/surface/functions/deploy.py index 771c0f9..d05ca97 100755 --- a/google-cloud-sdk/lib/surface/functions/deploy.py +++ b/google-cloud-sdk/lib/surface/functions/deploy.py @@ -88,7 +88,7 @@ def _SourceCodeArgs(parser): 'One of the parameters --source-revision, --source-branch, ' 'or --source-tag can be given to specify the version in the ' 'repository. If none of them are provided, the last revision ' - 'from the master branch is used. If this parameter is given, ' + 'from the main branch is used. If this parameter is given, ' 'the parameter --source is required and describes the path ' 'inside the repository.')) source_version_group = parser.add_mutually_exclusive_group() @@ -102,7 +102,7 @@ def _SourceCodeArgs(parser): help=('The branch that will be used to get the source code of the ' 'function. The most recent revision on this branch will be ' 'used. Can be specified only together with --source-url ' - 'parameter. If not specified defaults to `master`.')) + 'parameter. If not specified defaults to `main`.')) source_version_group.add_argument( '--source-tag', help="""\ @@ -296,7 +296,7 @@ def _DeployFunction(self, name, location, args, deploy_method, if args.source_url: messages = self.context['functions_messages'] source_path = args.source_path - source_branch = args.source_branch or 'master' + source_branch = args.source_branch or 'main' function.sourceRepository = messages.SourceRepository( tag=args.source_tag, branch=source_branch, revision=args.source_revision, repositoryUrl=args.source_url, diff --git a/google-cloud-sdk/lib/surface/ml_engine/local/train.py b/google-cloud-sdk/lib/surface/ml_engine/local/train.py index 21e02d4..f0fd33e 100755 --- a/google-cloud-sdk/lib/surface/ml_engine/local/train.py +++ b/google-cloud-sdk/lib/surface/ml_engine/local/train.py @@ -92,7 +92,7 @@ def Run(self, args): retval = local_train.MakeProcess(args.module_name, package_root, args=args.user_args, - task_type='master') + task_type='main') # Don't raise an exception because the users will already see the message. # We want this to mimic calling the script directly as much as possible. self.exit_code = retval diff --git a/google-cloud-sdk/lib/surface/source/repos/clone.py b/google-cloud-sdk/lib/surface/source/repos/clone.py index 0b7fb42..0819c9d 100755 --- a/google-cloud-sdk/lib/surface/source/repos/clone.py +++ b/google-cloud-sdk/lib/surface/source/repos/clone.py @@ -52,7 +52,7 @@ class CloneGA(base.Command): $ gcloud source repos clone default TARGET_DIR $ cd TARGET_DIR ... create/edit files and create one or more commits ... - $ git push origin master + $ git push origin main """), } @@ -125,7 +125,7 @@ class CloneAlpha(base.Command): $ gcloud source repos clone default TARGET_DIR $ cd TARGET_DIR ... create/edit files and create one or more commits ... - $ git push origin master + $ git push origin main """), } diff --git a/google-cloud-sdk/lib/surface/sql/instances/create.py b/google-cloud-sdk/lib/surface/sql/instances/create.py index 5742657..8c56401 100755 --- a/google-cloud-sdk/lib/surface/sql/instances/create.py +++ b/google-cloud-sdk/lib/surface/sql/instances/create.py @@ -115,11 +115,11 @@ def Args(parser): 'instance', help='Cloud SQL instance ID.') parser.add_argument( - '--master-instance-name', + '--main-instance-name', required=False, - help='Name of the instance which will act as master in the replication ' + help='Name of the instance which will act as main in the replication ' 'setup. The newly created instance will be a read replica of the ' - 'specified master instance.') + 'specified main instance.') parser.add_argument( '--on-premises-host-port', required=False, diff --git a/google-cloud-sdk/lib/surface/sql/instances/failover.py b/google-cloud-sdk/lib/surface/sql/instances/failover.py index 82e0bd0..fca533b 100755 --- a/google-cloud-sdk/lib/surface/sql/instances/failover.py +++ b/google-cloud-sdk/lib/surface/sql/instances/failover.py @@ -61,7 +61,7 @@ def Run(self, args): console_io.PromptContinue( message='Failover will be initiated. Existing connections to the ' - 'master instance will break and no new connection can be established ' + 'main instance will break and no new connection can be established ' 'during the failover.', default=True, cancel_on_no=True) diff --git a/google-cloud-sdk/lib/third_party/apitools/base/py/batch.py b/google-cloud-sdk/lib/third_party/apitools/base/py/batch.py index 95a8f7a..361b032 100755 --- a/google-cloud-sdk/lib/third_party/apitools/base/py/batch.py +++ b/google-cloud-sdk/lib/third_party/apitools/base/py/batch.py @@ -176,7 +176,7 @@ def Add(self, service, method, request, global_params=None): method_config, request, global_params=global_params, upload_config=upload_config) - # Create the request and add it to our master list. + # Create the request and add it to our main list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request) diff --git a/google-cloud-sdk/lib/third_party/dns/rdataset.py b/google-cloud-sdk/lib/third_party/dns/rdataset.py index f556d22..3b25679 100755 --- a/google-cloud-sdk/lib/third_party/dns/rdataset.py +++ b/google-cloud-sdk/lib/third_party/dns/rdataset.py @@ -169,7 +169,7 @@ def __ne__(self, other): def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): - """Convert the rdataset into DNS master file format. + """Convert the rdataset into DNS main file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names diff --git a/google-cloud-sdk/lib/third_party/dns/rdtypes/ANY/SOA.py b/google-cloud-sdk/lib/third_party/dns/rdtypes/ANY/SOA.py index a25a35e..0b1896c 100755 --- a/google-cloud-sdk/lib/third_party/dns/rdtypes/ANY/SOA.py +++ b/google-cloud-sdk/lib/third_party/dns/rdtypes/ANY/SOA.py @@ -22,7 +22,7 @@ class SOA(dns.rdata.Rdata): """SOA record - @ivar mname: the SOA MNAME (master name) field + @ivar mname: the SOA MNAME (main name) field @type mname: dns.name.Name object @ivar rname: the SOA RNAME (responsible name) field @type rname: dns.name.Name object diff --git a/google-cloud-sdk/lib/third_party/dns/rrset.py b/google-cloud-sdk/lib/third_party/dns/rrset.py index 2146817..1d1c32e 100755 --- a/google-cloud-sdk/lib/third_party/dns/rrset.py +++ b/google-cloud-sdk/lib/third_party/dns/rrset.py @@ -84,7 +84,7 @@ def match(self, name, rdclass, rdtype, covers, deleting=None): return True def to_text(self, origin=None, relativize=True, **kw): - """Convert the RRset into DNS master file format. + """Convert the RRset into DNS main file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names diff --git a/google-cloud-sdk/lib/third_party/dns/tokenizer.py b/google-cloud-sdk/lib/third_party/dns/tokenizer.py index 4f68a2a..e99097a 100755 --- a/google-cloud-sdk/lib/third_party/dns/tokenizer.py +++ b/google-cloud-sdk/lib/third_party/dns/tokenizer.py @@ -13,7 +13,7 @@ # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -"""Tokenize DNS master file format""" +"""Tokenize DNS main file format""" import cStringIO import sys @@ -47,7 +47,7 @@ class UngetBufferFull(dns.exception.DNSException): pass class Token(object): - """A DNS master file format token. + """A DNS main file format token. @ivar ttype: The token type @type ttype: int @@ -156,7 +156,7 @@ def __getitem__(self, i): raise IndexError class Tokenizer(object): - """A DNS master file format tokenizer. + """A DNS main file format tokenizer. A token is a (type, value) tuple, where I{type} is an int, and I{value} is a string. The valid types are EOF, EOL, WHITESPACE, diff --git a/google-cloud-sdk/lib/third_party/dns/zone.py b/google-cloud-sdk/lib/third_party/dns/zone.py index db5fd5d..6a2603e 100755 --- a/google-cloud-sdk/lib/third_party/dns/zone.py +++ b/google-cloud-sdk/lib/third_party/dns/zone.py @@ -520,8 +520,8 @@ def check_origin(self): raise NoNS -class _MasterReader(object): - """Read a DNS master file +class _MainReader(object): + """Read a DNS main file @ivar tok: The tokenizer @type tok: dns.tokenizer.Tokenizer object @@ -569,7 +569,7 @@ def _eat_line(self): break def _rr_line(self): - """Process one line from a DNS master file.""" + """Process one line from a DNS main file.""" # Name if self.current_origin is None: raise UnknownOrigin @@ -642,7 +642,7 @@ def _rr_line(self): rds.add(rd, ttl) def read(self): - """Read a DNS master file and build a zone object. + """Read a DNS main file and build a zone object. @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @@ -704,7 +704,7 @@ def read(self): filename) self.current_origin = new_origin else: - raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'") + raise dns.exception.SyntaxError("Unknown main file directive '" + u + "'") continue self.tok.unget(token) self._rr_line() @@ -721,12 +721,12 @@ def read(self): def from_text(text, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=False, check_origin=True): - """Build a zone object from a master file format string. + """Build a zone object from a main file format string. - @param text: the master file format input + @param text: the main file format input @type text: string. @param origin: The origin of the zone; if not specified, the first - $ORIGIN statement in the master file will determine the origin of the + $ORIGIN statement in the main file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @@ -755,7 +755,7 @@ def from_text(text, origin = None, rdclass = dns.rdataclass.IN, if filename is None: filename = '' tok = dns.tokenizer.Tokenizer(text, filename) - reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, + reader = _MainReader(tok, origin, rdclass, relativize, zone_factory, allow_include=allow_include, check_origin=check_origin) reader.read() @@ -764,12 +764,12 @@ def from_text(text, origin = None, rdclass = dns.rdataclass.IN, def from_file(f, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=True, check_origin=True): - """Read a master file and build a zone object. + """Read a main file and build a zone object. @param f: file or string. If I{f} is a string, it is treated as the name of a file to open. @param origin: The origin of the zone; if not specified, the first - $ORIGIN statement in the master file will determine the origin of the + $ORIGIN statement in the main file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. diff --git a/google-cloud-sdk/lib/third_party/dulwich/repo.py b/google-cloud-sdk/lib/third_party/dulwich/repo.py index 12faefc..f069599 100755 --- a/google-cloud-sdk/lib/third_party/dulwich/repo.py +++ b/google-cloud-sdk/lib/third_party/dulwich/repo.py @@ -790,8 +790,8 @@ def clone(self, target_path, mkdir=True, bare=False, b'refs/tags', self.refs.as_dict(b'refs/tags')) try: target.refs.add_if_new( - b'refs/heads/master', - self.refs[b'refs/heads/master']) + b'refs/heads/main', + self.refs[b'refs/heads/main']) except KeyError: pass @@ -876,7 +876,7 @@ def _init_maybe_bare(cls, path, bare): os.mkdir(os.path.join(path, *d)) DiskObjectStore.init(os.path.join(path, OBJECTDIR)) ret = cls(path) - ret.refs.set_symbolic_ref(b'HEAD', b"refs/heads/master") + ret.refs.set_symbolic_ref(b'HEAD', b"refs/heads/main") ret._init_files(bare) return ret diff --git a/google-cloud-sdk/lib/third_party/pkg_resources/__init__.py b/google-cloud-sdk/lib/third_party/pkg_resources/__init__.py index e33de0e..a389aa2 100755 --- a/google-cloud-sdk/lib/third_party/pkg_resources/__init__.py +++ b/google-cloud-sdk/lib/third_party/pkg_resources/__init__.py @@ -638,9 +638,9 @@ def __init__(self, entries=None): self.add_entry(entry) @classmethod - def _build_master(cls): + def _build_main(cls): """ - Prepare the master working set. + Prepare the main working set. """ ws = cls() try: @@ -3028,9 +3028,9 @@ def _initialize(g=globals()): @_call_aside -def _initialize_master_working_set(): +def _initialize_main_working_set(): """ - Prepare the master working set and make the ``require()`` + Prepare the main working set and make the ``require()`` API available. This function has explicit effects on the global state @@ -3040,7 +3040,7 @@ def _initialize_master_working_set(): Invocation by other packages is unsupported and done at their own risk. """ - working_set = WorkingSet._build_master() + working_set = WorkingSet._build_main() _declare_state('object', working_set=working_set) require = working_set.require diff --git a/google-cloud-sdk/lib/third_party/pygments/lexers/_phpbuiltins.py b/google-cloud-sdk/lib/third_party/pygments/lexers/_phpbuiltins.py index cd1608f..adeb013 100755 --- a/google-cloud-sdk/lib/third_party/pygments/lexers/_phpbuiltins.py +++ b/google-cloud-sdk/lib/third_party/pygments/lexers/_phpbuiltins.py @@ -57,15 +57,15 @@ 'mysqli_bind_result', 'mysqli_client_encoding', 'mysqli_connect', - 'mysqli_disable_reads_from_master', + 'mysqli_disable_reads_from_main', 'mysqli_disable_rpl_parse', - 'mysqli_enable_reads_from_master', + 'mysqli_enable_reads_from_main', 'mysqli_enable_rpl_parse', 'mysqli_escape_string', 'mysqli_execute', 'mysqli_fetch', 'mysqli_get_metadata', - 'mysqli_master_query', + 'mysqli_main_query', 'mysqli_param_count', 'mysqli_report', 'mysqli_rpl_parse_enabled', @@ -74,7 +74,7 @@ 'mysqli_send_long_data', 'mysqli_send_query', 'mysqli_set_opt', - 'mysqli_slave_query'], + 'mysqli_subordinate_query'], 'Apache': ['apache_child_terminate', 'apache_get_modules', 'apache_get_version', @@ -1536,11 +1536,11 @@ 'maxdb_connect', 'maxdb_data_seek', 'maxdb_debug', - 'maxdb_disable_reads_from_master', + 'maxdb_disable_reads_from_main', 'maxdb_disable_rpl_parse', 'maxdb_dump_debug_info', 'maxdb_embedded_connect', - 'maxdb_enable_reads_from_master', + 'maxdb_enable_reads_from_main', 'maxdb_enable_rpl_parse', 'maxdb_errno', 'maxdb_error', @@ -1570,7 +1570,7 @@ 'maxdb_init', 'maxdb_insert_id', 'maxdb_kill', - 'maxdb_master_query', + 'maxdb_main_query', 'maxdb_more_results', 'maxdb_multi_query', 'maxdb_next_result', @@ -3422,7 +3422,7 @@ 'yp_errno', 'yp_first', 'yp_get_default_domain', - 'yp_master', + 'yp_main', 'yp_match', 'yp_next', 'yp_order'], diff --git a/google-cloud-sdk/lib/third_party/setuptools/command/easy_install.py b/google-cloud-sdk/lib/third_party/setuptools/command/easy_install.py index d3eabfc..4d56294 100755 --- a/google-cloud-sdk/lib/third_party/setuptools/command/easy_install.py +++ b/google-cloud-sdk/lib/third_party/setuptools/command/easy_install.py @@ -1733,7 +1733,7 @@ def update_dist_caches(dist_path, fix_zipimporter_caches): # There are several other known sources of stale zipimport.zipimporter # instances that we do not clear here, but might if ever given a reason to # do so: - # * Global setuptools pkg_resources.working_set (a.k.a. 'master working + # * Global setuptools pkg_resources.working_set (a.k.a. 'main working # set') may contain distributions which may in turn contain their # zipimport.zipimporter loaders. # * Several zipimport.zipimporter loaders held by local variables further diff --git a/google-cloud-sdk/platform/gsutil/gslib/addlhelp/naming.py b/google-cloud-sdk/platform/gsutil/gslib/addlhelp/naming.py index 8f278cd..9b10921 100755 --- a/google-cloud-sdk/platform/gsutil/gslib/addlhelp/naming.py +++ b/google-cloud-sdk/platform/gsutil/gslib/addlhelp/naming.py @@ -94,8 +94,8 @@ user who creates the bucket, so the user who creates the bucket must also be verified as an owner or manager of the domain. - To verify as the owner or manager of a domain, use the Google Webmaster - Tools verification process. The Webmaster Tools verification process + To verify as the owner or manager of a domain, use the Google Webmain + Tools verification process. The Webmain Tools verification process provides three methods for verifying an owner or manager of a domain: 1. Adding a special Meta tag to a site's homepage. diff --git a/google-cloud-sdk/platform/gsutil/gslib/commands/notification.py b/google-cloud-sdk/platform/gsutil/gslib/commands/notification.py index 848122d..620690d 100755 --- a/google-cloud-sdk/platform/gsutil/gslib/commands/notification.py +++ b/google-cloud-sdk/platform/gsutil/gslib/commands/notification.py @@ -127,7 +127,7 @@ Service Account authentication and that the Service Account's project is authorized for the application URL. Notification endpoint URLs must also be whitelisted in your Cloud Console project. To do that, the domain must also be -verified using Google Webmaster Tools. For instructions, please see: +verified using Google Webmain Tools. For instructions, please see: https://cloud.google.com/storage/docs/object-change-notification#_Authorization """ diff --git a/google-cloud-sdk/platform/gsutil/gslib/ui_controller.py b/google-cloud-sdk/platform/gsutil/gslib/ui_controller.py index 1bb4f1d..5d52494 100755 --- a/google-cloud-sdk/platform/gsutil/gslib/ui_controller.py +++ b/google-cloud-sdk/platform/gsutil/gslib/ui_controller.py @@ -1044,7 +1044,7 @@ def Call(self, status_message, stream, cur_time=None): class MainThreadUIQueue(object): - """Handles status display and processing in the main thread / master process. + """Handles status display and processing in the main thread / main process. This class emulates a queue to cover main-thread activity before or after Apply, as well as for the single-threaded, single-process case, i.e., diff --git a/google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/base/py/batch.py b/google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/base/py/batch.py index 7efe6dd..abca0ff 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/base/py/batch.py +++ b/google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/base/py/batch.py @@ -174,7 +174,7 @@ def Add(self, service, method, request, global_params=None): method_config, request, global_params=global_params, upload_config=upload_config) - # Create the request and add it to our master list. + # Create the request and add it to our main list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request) diff --git a/google-cloud-sdk/platform/gsutil/third_party/apitools/run_pylint.py b/google-cloud-sdk/platform/gsutil/third_party/apitools/run_pylint.py index fd55463..3103aa1 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/apitools/run_pylint.py +++ b/google-cloud-sdk/platform/gsutil/third_party/apitools/run_pylint.py @@ -111,9 +111,9 @@ def get_files_for_linting(allow_limited=True, diff_base=None): uses a specific commit or branch (a so-called diff base) to compare against for changed files. (This requires ``allow_limited=True``.) - To speed up linting on Travis pull requests against master, we manually - set the diff base to origin/master. We don't do this on non-pull requests - since origin/master will be equivalent to the currently checked out code. + To speed up linting on Travis pull requests against main, we manually + set the diff base to origin/main. We don't do this on non-pull requests + since origin/main will be equivalent to the currently checked out code. One could potentially use ${TRAVIS_COMMIT_RANGE} to find a diff base but this value is not dependable. @@ -126,14 +126,14 @@ def get_files_for_linting(allow_limited=True, diff_base=None): linted. """ if os.getenv('TRAVIS') == 'true': - # In travis, don't default to master. + # In travis, don't default to main. diff_base = None - if (os.getenv('TRAVIS_BRANCH') == 'master' and + if (os.getenv('TRAVIS_BRANCH') == 'main' and os.getenv('TRAVIS_PULL_REQUEST') != 'false'): - # In the case of a pull request into master, we want to - # diff against HEAD in master. - diff_base = 'origin/master' + # In the case of a pull request into main, we want to + # diff against HEAD in main. + diff_base = 'origin/main' if diff_base is not None and allow_limited: result = subprocess.check_output(['git', 'diff', '--name-only', diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/elastictranscoder/layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/elastictranscoder/layer1.py index 0f4dc9c..3a5ce45 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/elastictranscoder/layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/elastictranscoder/layer1.py @@ -118,10 +118,10 @@ def create_job(self, pipeline_id=None, input_name=None, output=None, :type playlists: list :param playlists: If you specify a preset in `PresetId` for which the value of `Container` is ts (MPEG-TS), Playlists contains - information about the master playlists that you want Elastic + information about the main playlists that you want Elastic Transcoder to create. - We recommend that you create only one master playlist. The maximum - number of master playlists in a job is 30. + We recommend that you create only one main playlist. The maximum + number of main playlists in a job is 30. """ uri = '/2012-09-25/jobs' diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py index 7afc4e0..870a9bd 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py @@ -408,8 +408,8 @@ def modify_instance_groups(self, instance_group_ids, new_sizes): def run_jobflow(self, name, log_uri=None, ec2_keyname=None, availability_zone=None, - master_instance_type='m1.small', - slave_instance_type='m1.small', num_instances=1, + main_instance_type='m1.small', + subordinate_instance_type='m1.small', num_instances=1, action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False, enable_debugging=False, hadoop_version=None, @@ -436,11 +436,11 @@ def run_jobflow(self, name, log_uri=None, ec2_keyname=None, :type availability_zone: str :param availability_zone: EC2 availability zone of the cluster - :type master_instance_type: str - :param master_instance_type: EC2 instance type of the master + :type main_instance_type: str + :param main_instance_type: EC2 instance type of the main - :type slave_instance_type: str - :param slave_instance_type: EC2 instance type of the slave nodes + :type subordinate_instance_type: str + :param subordinate_instance_type: EC2 instance type of the subordinate nodes :type num_instances: int :param num_instances: Number of instances in the Hadoop cluster @@ -471,7 +471,7 @@ def run_jobflow(self, name, log_uri=None, ec2_keyname=None, :param instance_groups: Optional list of instance groups to use when creating this job. NB: When provided, this argument supersedes num_instances - and master/slave_instance_type. + and main/subordinate_instance_type. :type ami_version: str :param ami_version: Amazon Machine Image (AMI) version to use @@ -526,15 +526,15 @@ def run_jobflow(self, name, log_uri=None, ec2_keyname=None, params.update(common_params) # NB: according to the AWS API's error message, we must - # "configure instances either using instance count, master and - # slave instance type or instance groups but not both." + # "configure instances either using instance count, main and + # subordinate instance type or instance groups but not both." # # Thus we switch here on the truthiness of instance_groups. if not instance_groups: # Instance args (the common case) instance_params = self._build_instance_count_and_type_args( - master_instance_type, - slave_instance_type, + main_instance_type, + subordinate_instance_type, num_instances) params.update(instance_params) else: @@ -721,15 +721,15 @@ def _build_instance_common_args(self, ec2_keyname, availability_zone, return params - def _build_instance_count_and_type_args(self, master_instance_type, - slave_instance_type, num_instances): + def _build_instance_count_and_type_args(self, main_instance_type, + subordinate_instance_type, num_instances): """ - Takes a master instance type (string), a slave instance type + Takes a main instance type (string), a subordinate instance type (string), and a number of instances. Returns a comparable dict for use in making a RunJobFlow request. """ - params = {'Instances.MasterInstanceType': master_instance_type, - 'Instances.SlaveInstanceType': slave_instance_type, + params = {'Instances.MainInstanceType': main_instance_type, + 'Instances.SubordinateInstanceType': subordinate_instance_type, 'Instances.InstanceCount': num_instances} return params diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/step.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/step.py index de6835f..35cb8bb 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/step.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/step.py @@ -132,7 +132,7 @@ def __init__(self, name, mapper, reducer=None, combiner=None, :param output: The output uri :type jar: str :param jar: The hadoop streaming jar. This can be either a local - path on the master node, or an s3:// URI. + path on the main node, or an s3:// URI. """ self.name = name self.mapper = mapper diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/kms/layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/kms/layer1.py index 88ea2e0..3fbc059 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/kms/layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/kms/layer1.py @@ -130,7 +130,7 @@ def _required_auth_capability(self): def create_alias(self, alias_name, target_key_id): """ - Creates a display name for a customer master key. An alias can + Creates a display name for a customer main key. An alias can be used to identify a key and should be unique. The console enforces a one-to-one mapping between the alias and a key. An alias name can contain only alphanumeric characters, forward @@ -174,7 +174,7 @@ def create_grant(self, key_id, grantee_principal, #. RevokeGrant :type key_id: string - :param key_id: A unique key identifier for a customer master key. This + :param key_id: A unique key identifier for a customer main key. This value can be a globally unique identifier, an ARN, or an alias. :type grantee_principal: string @@ -222,7 +222,7 @@ def create_grant(self, key_id, grantee_principal, def create_key(self, policy=None, description=None, key_usage=None): """ - Creates a customer master key. Customer master keys can be + Creates a customer main key. Customer main keys can be used to encrypt small amounts of data (less than 4K) directly, but they are most commonly used to encrypt or envelope data keys that are then used to encrypt customer data. For more @@ -306,10 +306,10 @@ def delete_alias(self, alias_name): def describe_key(self, key_id): """ Provides detailed information about the specified customer - master key. + main key. :type key_id: string - :param key_id: Unique identifier of the customer master key to be + :param key_id: Unique identifier of the customer main key to be described. This can be an ARN, an alias, or a globally unique identifier. @@ -323,7 +323,7 @@ def disable_key(self, key_id): Marks a key as disabled, thereby preventing its use. :type key_id: string - :param key_id: Unique identifier of the customer master key to be + :param key_id: Unique identifier of the customer main key to be disabled. This can be an ARN, an alias, or a globally unique identifier. @@ -337,7 +337,7 @@ def disable_key_rotation(self, key_id): Disables rotation of the specified key. :type key_id: string - :param key_id: Unique identifier of the customer master key for which + :param key_id: Unique identifier of the customer main key for which rotation is to be disabled. This can be an ARN, an alias, or a globally unique identifier. @@ -352,7 +352,7 @@ def enable_key(self, key_id): have up to 25 enabled keys at one time. :type key_id: string - :param key_id: Unique identifier of the customer master key to be + :param key_id: Unique identifier of the customer main key to be enabled. This can be an ARN, an alias, or a globally unique identifier. @@ -363,10 +363,10 @@ def enable_key(self, key_id): def enable_key_rotation(self, key_id): """ - Enables rotation of the specified customer master key. + Enables rotation of the specified customer main key. :type key_id: string - :param key_id: Unique identifier of the customer master key for which + :param key_id: Unique identifier of the customer main key for which rotation is to be enabled. This can be an ARN, an alias, or a globally unique identifier. @@ -378,11 +378,11 @@ def enable_key_rotation(self, key_id): def encrypt(self, key_id, plaintext, encryption_context=None, grant_tokens=None): """ - Encrypts plaintext into ciphertext by using a customer master + Encrypts plaintext into ciphertext by using a customer main key. :type key_id: string - :param key_id: Unique identifier of the customer master. This can be an + :param key_id: Unique identifier of the customer main. This can be an ARN, an alias, or the Key ID. :type plaintext: blob @@ -420,7 +420,7 @@ def generate_data_key(self, key_id, encryption_context=None, grant_tokens=None): """ Generates a secure data key. Data keys are used to encrypt and - decrypt data. They are wrapped by customer master keys. + decrypt data. They are wrapped by customer main keys. :type key_id: string :param key_id: Unique identifier of the key. This can be an ARN, an @@ -472,7 +472,7 @@ def generate_data_key_without_plaintext(self, key_id, number_of_bytes=None, grant_tokens=None): """ - Returns a key wrapped by a customer master key without the + Returns a key wrapped by a customer main key without the plaintext copy of that key. To retrieve the plaintext, see GenerateDataKey. @@ -651,7 +651,7 @@ def list_key_policies(self, key_id, limit=None, marker=None): def list_keys(self, limit=None, marker=None): """ - Lists the customer master keys. + Lists the customer main keys. :type limit: integer :param limit: Specify this parameter only when paginating results to @@ -702,7 +702,7 @@ def re_encrypt(self, ciphertext_blob, destination_key_id, source_encryption_context=None, destination_encryption_context=None, grant_tokens=None): """ - Encrypts data on the server side with a new customer master + Encrypts data on the server side with a new customer main key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/opsworks/layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/opsworks/layer1.py index 8894d1c..e007856 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/opsworks/layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/opsworks/layer1.py @@ -2124,7 +2124,7 @@ def register_rds_db_instance(self, stack_id, rds_db_instance_arn, :param rds_db_instance_arn: The Amazon RDS instance's ARN. :type db_user: string - :param db_user: The database's master user name. + :param db_user: The database's main user name. :type db_password: string :param db_password: The database password. @@ -2785,7 +2785,7 @@ def update_rds_db_instance(self, rds_db_instance_arn, db_user=None, :param rds_db_instance_arn: The Amazon RDS instance's ARN. :type db_user: string - :param db_user: The master user name. + :param db_user: The main user name. :type db_password: string :param db_password: The database password. diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/pyami/bootstrap.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/pyami/bootstrap.py index 82c2822..21fafd2 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/pyami/bootstrap.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/pyami/bootstrap.py @@ -88,7 +88,7 @@ def load_boto(self): if update.find(':') >= 0: method, version = update.split(':') else: - version = 'master' + version = 'main' self.run('git checkout %s' % version, cwd=location) else: # first remove the symlink needed when running from subversion diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/__init__.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/__init__.py index 15c838b..901b6e1 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/__init__.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/__init__.py @@ -139,8 +139,8 @@ def create_dbinstance(self, id, allocated_storage, instance_class, - master_username, - master_password, + main_username, + main_password, port=3306, engine='MySQL5.1', db_name=None, @@ -169,8 +169,8 @@ def create_dbinstance(self, # security_groups should be db_security_groups according to API docs but has been left # security_groups for backwards compatibility # - # master_password should be master_user_password according to API docs but has been left - # master_password for backwards compatibility + # main_password should be main_user_password according to API docs but has been left + # main_password for backwards compatibility # # instance_class should be db_instance_class according to API docs but has been left # instance_class for backwards compatibility @@ -223,8 +223,8 @@ def create_dbinstance(self, * sqlserver-web * postgres - :type master_username: str - :param master_username: Name of master user for the DBInstance. + :type main_username: str + :param main_username: Name of main user for the DBInstance. * MySQL must be; - 1--16 alphanumeric characters @@ -241,8 +241,8 @@ def create_dbinstance(self, - first character must be a letter - cannot be a reserver SQL Server word - :type master_password: str - :param master_password: Password of master user for the DBInstance. + :type main_password: str + :param main_password: Password of main user for the DBInstance. * MySQL must be 8--41 alphanumeric characters @@ -399,8 +399,8 @@ def create_dbinstance(self, # engine => Engine # engine_version => EngineVersion # license_model => LicenseModel - # master_username => MasterUsername - # master_user_password => MasterUserPassword + # main_username => MainUsername + # main_user_password => MainUserPassword # multi_az => MultiAZ # option_group_name => OptionGroupName # port => Port @@ -424,8 +424,8 @@ def create_dbinstance(self, 'EngineVersion': engine_version, 'Iops': iops, 'LicenseModel': license_model, - 'MasterUsername': master_username, - 'MasterUserPassword': master_password, + 'MainUsername': main_username, + 'MainUserPassword': main_password, 'MultiAZ': str(multi_az).lower() if multi_az else None, 'OptionGroupName': option_group_name, 'Port': port, @@ -563,7 +563,7 @@ def promote_read_replica(self, id, def modify_dbinstance(self, id, param_group=None, security_groups=None, preferred_maintenance_window=None, - master_password=None, allocated_storage=None, + main_password=None, allocated_storage=None, instance_class=None, backup_retention_period=None, preferred_backup_window=None, @@ -594,8 +594,8 @@ def modify_dbinstance(self, id, param_group=None, security_groups=None, occur. Default is Sun:05:00-Sun:09:00 - :type master_password: str - :param master_password: Password of master user for the DBInstance. + :type main_password: str + :param main_password: Password of main user for the DBInstance. Must be 4-15 alphanumeric characters. :type allocated_storage: int @@ -681,8 +681,8 @@ def modify_dbinstance(self, id, param_group=None, security_groups=None, self.build_list_params(params, l, 'VpcSecurityGroupIds.member') if preferred_maintenance_window: params['PreferredMaintenanceWindow'] = preferred_maintenance_window - if master_password: - params['MasterUserPassword'] = master_password + if main_password: + params['MainUserPassword'] = main_password if allocated_storage: params['AllocatedStorage'] = allocated_storage if instance_class: diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbinstance.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbinstance.py index 6a63851..e0acf27 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbinstance.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbinstance.py @@ -47,7 +47,7 @@ class DBInstance(object): in status "available". :ivar instance_class: Contains the name of the compute and memory capacity class of the DB Instance. - :ivar master_username: The username that is set as master username + :ivar main_username: The username that is set as main username at creation time. :ivar parameter_groups: Provides the list of DB Parameter Groups applied to this DB Instance. @@ -98,7 +98,7 @@ def __init__(self, connection=None, id=None): self.auto_minor_version_upgrade = None self.endpoint = None self.instance_class = None - self.master_username = None + self.main_username = None self.parameter_groups = [] self.security_groups = [] self.read_replica_dbinstance_identifiers = [] @@ -172,8 +172,8 @@ def endElement(self, name, value, connection): self.auto_minor_version_upgrade = value.lower() == 'true' elif name == 'DBInstanceClass': self.instance_class = value - elif name == 'MasterUsername': - self.master_username = value + elif name == 'MainUsername': + self.main_username = value elif name == 'Port': if self._in_endpoint: self._port = int(value) @@ -293,7 +293,7 @@ def stop(self, skip_final_snapshot=False, final_snapshot_id=''): def modify(self, param_group=None, security_groups=None, preferred_maintenance_window=None, - master_password=None, allocated_storage=None, + main_password=None, allocated_storage=None, instance_class=None, backup_retention_period=None, preferred_backup_window=None, @@ -318,8 +318,8 @@ def modify(self, param_group=None, security_groups=None, UTC) during which maintenance can occur. Default is Sun:05:00-Sun:09:00 - :type master_password: str - :param master_password: Password of master user for the DBInstance. + :type main_password: str + :param main_password: Password of main user for the DBInstance. Must be 4-15 alphanumeric characters. :type allocated_storage: int @@ -386,7 +386,7 @@ def modify(self, param_group=None, security_groups=None, param_group, security_groups, preferred_maintenance_window, - master_password, + main_password, allocated_storage, instance_class, backup_retention_period, diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbsnapshot.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbsnapshot.py index 16d8125..35ecbd5 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbsnapshot.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds/dbsnapshot.py @@ -34,10 +34,10 @@ class DBSnapshot(object): :ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier) :ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken :ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier) - :ivar master_username: Provides the master username for the DB Instance + :ivar main_username: Provides the main username for the DB Instance :ivar port: Specifies the port that the database engine was listening on at the time of the snapshot :ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken - :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ] + :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-main-credentials ] :ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot. :ivar option_group_name: Provides the option group name for the DB snapshot. :ivar percent_progress: The percentage of the estimated data that has been transferred. @@ -56,7 +56,7 @@ def __init__(self, connection=None, id=None): self.port = None self.status = None self.availability_zone = None - self.master_username = None + self.main_username = None self.allocated_storage = None self.instance_id = None self.availability_zone = None @@ -93,8 +93,8 @@ def endElement(self, name, value, connection): self.status = value elif name == 'AvailabilityZone': self.availability_zone = value - elif name == 'MasterUsername': - self.master_username = value + elif name == 'MainUsername': + self.main_username = value elif name == 'AllocatedStorage': self.allocated_storage = int(value) elif name == 'SnapshotTime': diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds2/layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds2/layer1.py index bbe5a77..9ad772f 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds2/layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/rds2/layer1.py @@ -321,8 +321,8 @@ def copy_db_snapshot(self, source_db_snapshot_identifier, path='/', params=params) def create_db_instance(self, db_instance_identifier, allocated_storage, - db_instance_class, engine, master_username, - master_user_password, db_name=None, + db_instance_class, engine, main_username, + main_user_password, db_name=None, db_security_groups=None, vpc_security_group_ids=None, availability_zone=None, db_subnet_group_name=None, @@ -417,9 +417,9 @@ def create_db_instance(self, db_instance_identifier, allocated_storage, Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` - :type master_username: string - :param master_username: - The name of master user for the client DB instance. + :type main_username: string + :param main_username: + The name of main user for the client DB instance. **MySQL** @@ -452,8 +452,8 @@ def create_db_instance(self, db_instance_identifier, allocated_storage, + First character must be a letter. + Cannot be a reserved word for the chosen database engine. - :type master_user_password: string - :param master_user_password: The password for the master database user. + :type main_user_password: string + :param main_user_password: The password for the main database user. Can be any printable ASCII character except "/", '"', or "@". Type: String @@ -536,7 +536,7 @@ def create_db_instance(self, db_instance_identifier, allocated_storage, + Must be a value from 0 to 8 - + Cannot be set to 0 if the DB instance is a master instance with read + + Cannot be set to 0 if the DB instance is a main instance with read replicas :type preferred_backup_window: string @@ -656,8 +656,8 @@ def create_db_instance(self, db_instance_identifier, allocated_storage, 'AllocatedStorage': allocated_storage, 'DBInstanceClass': db_instance_class, 'Engine': engine, - 'MasterUsername': master_username, - 'MasterUserPassword': master_user_password, + 'MainUsername': main_username, + 'MainUserPassword': main_user_password, } if db_name is not None: params['DBName'] = db_name @@ -2488,7 +2488,7 @@ def modify_db_instance(self, db_instance_identifier, allocated_storage=None, db_instance_class=None, db_security_groups=None, vpc_security_group_ids=None, - apply_immediately=None, master_user_password=None, + apply_immediately=None, main_user_password=None, db_parameter_group_name=None, backup_retention_period=None, preferred_backup_window=None, @@ -2616,14 +2616,14 @@ def modify_db_instance(self, db_instance_identifier, Default: `False` - :type master_user_password: string - :param master_user_password: - The new password for the DB instance master user. Can be any printable + :type main_user_password: string + :param main_user_password: + The new password for the DB instance main user. Can be any printable ASCII character except "/", '"', or "@". Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the - request and the completion of the request, the `MasterUserPassword` + request and the completion of the request, the `MainUserPassword` element exists in the `PendingModifiedValues` element of the operation response. @@ -2634,7 +2634,7 @@ def modify_db_instance(self, db_instance_identifier, characters (SQL Server). Amazon RDS API actions never return the password, so this action - provides a way to regain access to a master instance user if the + provides a way to regain access to a main instance user if the password is lost. :type db_parameter_group_name: string @@ -2668,7 +2668,7 @@ def modify_db_instance(self, db_instance_identifier, + Must be a value from 0 to 8 - + Cannot be set to 0 if the DB instance is a master instance with read + + Cannot be set to 0 if the DB instance is a main instance with read replicas or if the DB instance is a read replica :type preferred_backup_window: string @@ -2820,8 +2820,8 @@ def modify_db_instance(self, db_instance_identifier, if apply_immediately is not None: params['ApplyImmediately'] = str( apply_immediately).lower() - if master_user_password is not None: - params['MasterUserPassword'] = master_user_password + if main_user_password is not None: + params['MainUserPassword'] = main_user_password if db_parameter_group_name is not None: params['DBParameterGroupName'] = db_parameter_group_name if backup_retention_period is not None: diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/redshift/layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/redshift/layer1.py index be1529f..4639041 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/boto/redshift/layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/boto/redshift/layer1.py @@ -310,8 +310,8 @@ def copy_cluster_snapshot(self, source_snapshot_identifier, verb='POST', path='/', params=params) - def create_cluster(self, cluster_identifier, node_type, master_username, - master_user_password, db_name=None, cluster_type=None, + def create_cluster(self, cluster_identifier, node_type, main_username, + main_user_password, db_name=None, cluster_type=None, cluster_security_groups=None, vpc_security_group_ids=None, cluster_subnet_group_name=None, @@ -391,9 +391,9 @@ def create_cluster(self, cluster_identifier, node_type, master_username, Valid Values: `dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | `dw2.8xlarge`. - :type master_username: string - :param master_username: - The user name associated with the master user account for the cluster + :type main_username: string + :param main_username: + The user name associated with the main user account for the cluster that is being created. Constraints: @@ -404,9 +404,9 @@ def create_cluster(self, cluster_identifier, node_type, master_username, + Cannot be a reserved word. A list of reserved words can be found in `Reserved Words`_ in the Amazon Redshift Database Developer Guide. - :type master_user_password: string - :param master_user_password: - The password associated with the master user account for the cluster + :type main_user_password: string + :param main_user_password: + The password associated with the main user account for the cluster that is being created. Constraints: @@ -573,8 +573,8 @@ def create_cluster(self, cluster_identifier, node_type, master_username, params = { 'ClusterIdentifier': cluster_identifier, 'NodeType': node_type, - 'MasterUsername': master_username, - 'MasterUserPassword': master_user_password, + 'MainUsername': main_username, + 'MainUserPassword': main_user_password, } if db_name is not None: params['DBName'] = db_name @@ -2253,7 +2253,7 @@ def modify_cluster(self, cluster_identifier, cluster_type=None, node_type=None, number_of_nodes=None, cluster_security_groups=None, vpc_security_group_ids=None, - master_user_password=None, + main_user_password=None, cluster_parameter_group_name=None, automated_snapshot_retention_period=None, preferred_maintenance_window=None, @@ -2264,7 +2264,7 @@ def modify_cluster(self, cluster_identifier, cluster_type=None, """ Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred - maintenance window, or change the master user password. + maintenance window, or change the main user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters @@ -2345,11 +2345,11 @@ def modify_cluster(self, cluster_identifier, cluster_type=None, :param vpc_security_group_ids: A list of virtual private cloud (VPC) security groups to be associated with the cluster. - :type master_user_password: string - :param master_user_password: - The new password for the cluster master user. This change is + :type main_user_password: string + :param main_user_password: + The new password for the cluster main user. This change is asynchronously applied as soon as possible. Between the time of the - request and the completion of the request, the `MasterUserPassword` + request and the completion of the request, the `MainUserPassword` element exists in the `PendingModifiedValues` element of the operation response. @@ -2464,8 +2464,8 @@ def modify_cluster(self, cluster_identifier, cluster_type=None, self.build_list_params(params, vpc_security_group_ids, 'VpcSecurityGroupIds.member') - if master_user_password is not None: - params['MasterUserPassword'] = master_user_password + if main_user_password is not None: + params['MainUserPassword'] = main_user_password if cluster_parameter_group_name is not None: params['ClusterParameterGroupName'] = cluster_parameter_group_name if automated_snapshot_retention_period is not None: diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/test_promote_modify.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/test_promote_modify.py index 20963ed..6d417e6 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/test_promote_modify.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/test_promote_modify.py @@ -22,38 +22,38 @@ class PromoteReadReplicaTest(unittest.TestCase): def setUp(self): self.conn = RDSConnection() - self.masterDB_name = "boto-db-%s" % str(int(time.time())) - self.replicaDB_name = "replica-%s" % self.masterDB_name - self.renamedDB_name = "renamed-replica-%s" % self.masterDB_name + self.mainDB_name = "boto-db-%s" % str(int(time.time())) + self.replicaDB_name = "replica-%s" % self.mainDB_name + self.renamedDB_name = "renamed-replica-%s" % self.mainDB_name def tearDown(self): instances = self.conn.get_all_dbinstances() - for db in [self.masterDB_name, self.replicaDB_name, self.renamedDB_name]: + for db in [self.mainDB_name, self.replicaDB_name, self.renamedDB_name]: for i in instances: if i.id == db: self.conn.delete_dbinstance(db, skip_final_snapshot=True) def test_promote(self): print '--- running RDS promotion & renaming tests ---' - self.masterDB = self.conn.create_dbinstance(self.masterDB_name, 5, 'db.t1.micro', 'root', 'bototestpw') + self.mainDB = self.conn.create_dbinstance(self.mainDB_name, 5, 'db.t1.micro', 'root', 'bototestpw') - # Wait up to 15 minutes for the masterDB to become available - print '--- waiting for "%s" to become available ---' % self.masterDB_name + # Wait up to 15 minutes for the mainDB to become available + print '--- waiting for "%s" to become available ---' % self.mainDB_name wait_timeout = time.time() + (15 * 60) time.sleep(60) - instances = self.conn.get_all_dbinstances(self.masterDB_name) + instances = self.conn.get_all_dbinstances(self.mainDB_name) inst = instances[0] while wait_timeout > time.time() and inst.status != 'available': time.sleep(15) - instances = self.conn.get_all_dbinstances(self.masterDB_name) + instances = self.conn.get_all_dbinstances(self.mainDB_name) inst = instances[0] self.assertTrue(inst.status == 'available') - self.replicaDB = self.conn.create_dbinstance_read_replica(self.replicaDB_name, self.masterDB_name) + self.replicaDB = self.conn.create_dbinstance_read_replica(self.replicaDB_name, self.mainDB_name) # Wait up to 15 minutes for the replicaDB to become available print '--- waiting for "%s" to become available ---' % self.replicaDB_name @@ -92,8 +92,8 @@ def test_promote(self): self.assertTrue(inst.status == 'available') self.assertFalse(inst.status_infos) - # Verify that the master no longer has any read replicas - instances = self.conn.get_all_dbinstances(self.masterDB_name) + # Verify that the main no longer has any read replicas + instances = self.conn.get_all_dbinstances(self.mainDB_name) inst = instances[0] self.assertFalse(inst.read_replica_dbinstance_identifiers) @@ -101,7 +101,7 @@ def test_promote(self): self.renamedDB = self.conn.modify_dbinstance(self.replicaDB_name, new_instance_id=self.renamedDB_name, apply_immediately=True) - # Wait up to 15 minutes for the masterDB to become available + # Wait up to 15 minutes for the mainDB to become available print '--- waiting for "%s" to exist ---' % self.renamedDB_name wait_timeout = time.time() + (15 * 60) diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds2/test_connection.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds2/test_connection.py index 82d8193..7830636 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds2/test_connection.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds2/test_connection.py @@ -44,8 +44,8 @@ def test_integration(self): allocated_storage=5, db_instance_class='db.t1.micro', engine='postgres', - master_username='bototestuser', - master_user_password='testtestt3st', + main_username='bototestuser', + main_user_password='testtestt3st', # Try to limit the impact & test options. multi_az=False, backup_retention_period=0 diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/redshift/test_layer1.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/redshift/test_layer1.py index 490618e..6168508 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/redshift/test_layer1.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/redshift/test_layer1.py @@ -36,8 +36,8 @@ def setUp(self): self.api = RedshiftConnection() self.cluster_prefix = 'boto-redshift-cluster-%s' self.node_type = 'dw.hs1.xlarge' - self.master_username = 'mrtest' - self.master_password = 'P4ssword' + self.main_username = 'mrtest' + self.main_password = 'P4ssword' self.db_name = 'simon' # Redshift was taking ~20 minutes to bring clusters up in testing. self.wait_time = 60 * 20 @@ -50,7 +50,7 @@ def create_cluster(self): cluster_id = self.cluster_id() self.api.create_cluster( cluster_id, self.node_type, - self.master_username, self.master_password, + self.main_username, self.main_password, db_name=self.db_name, number_of_nodes=3 ) @@ -71,7 +71,7 @@ def test_create_delete_cluster(self): cluster_id = self.cluster_id() self.api.create_cluster( cluster_id, self.node_type, - self.master_username, self.master_password, + self.main_username, self.main_password, db_name=self.db_name, number_of_nodes=3 ) diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_connection.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_connection.py index ca13d1c..68637ca 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_connection.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_connection.py @@ -193,7 +193,7 @@ def default_body(self): 2014-01-24T02:19:46Z - Master instance group + Main instance group 1 0 MASTER @@ -253,7 +253,7 @@ def test_list_instance_groups(self): self.assertEqual(response.instancegroups[0].instancetype, "m1.large") self.assertEqual(response.instancegroups[0].market, "ON_DEMAND") self.assertEqual( - response.instancegroups[0].name, "Master instance group") + response.instancegroups[0].name, "Main instance group") self.assertEqual( response.instancegroups[0].requestedinstancecount, '1') self.assertEqual(response.instancegroups[0].runninginstancecount, '0') @@ -587,7 +587,7 @@ def default_body(self): false - ec2-184-0-0-1.us-west-1.compute.amazonaws.com + ec2-184-0-0-1.us-west-1.compute.amazonaws.com 10 my-service-role @@ -622,7 +622,7 @@ def test_describe_cluster(self): self.assertEqual(response.applications[0].name, 'hadoop') self.assertEqual(response.applications[0].version, '1.0.3') self.assertEqual( - response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + response.mainpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') self.assertEqual(response.normalizedinstancehours, '10') self.assertEqual(response.servicerole, 'my-service-role') @@ -862,7 +862,7 @@ def default_body(self): us-west-1c - m1.large + m1.large my_key true @@ -879,7 +879,7 @@ def default_body(self): ON_DEMAND ig-aaaaaa MASTER - Master instance group + Main instance group 2014-01-24T01:21:21Z @@ -897,11 +897,11 @@ def default_body(self): Core instance group - m1.large - i-aaaaaa + m1.large + i-aaaaaa 1.0.3 12 - ec2-184-0-0-1.us-west-1.compute.amazonaws.com + ec2-184-0-0-1.us-west-1.compute.amazonaws.com 3 false @@ -930,15 +930,15 @@ def test_describe_jobflows_response(self): self.assertEqual(jf.name, 'test analytics') self.assertEqual(jf.jobflowid, 'j-aaaaaa') self.assertEqual(jf.ec2keyname, 'my_key') - self.assertEqual(jf.masterinstancetype, 'm1.large') + self.assertEqual(jf.maininstancetype, 'm1.large') self.assertEqual(jf.availabilityzone, 'us-west-1c') self.assertEqual(jf.keepjobflowalivewhennosteps, 'true') - self.assertEqual(jf.slaveinstancetype, 'm1.large') - self.assertEqual(jf.masterinstanceid, 'i-aaaaaa') + self.assertEqual(jf.subordinateinstancetype, 'm1.large') + self.assertEqual(jf.maininstanceid, 'i-aaaaaa') self.assertEqual(jf.hadoopversion, '1.0.3') self.assertEqual(jf.normalizedinstancehours, '12') self.assertEqual( - jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + jf.mainpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') self.assertEqual(jf.instancecount, '3') self.assertEqual(jf.terminationprotected, 'false') @@ -961,7 +961,7 @@ def test_describe_jobflows_response(self): self.assertEqual(ig.market, 'ON_DEMAND') self.assertEqual(ig.instancegroupid, 'ig-aaaaaa') self.assertEqual(ig.instancerole, 'MASTER') - self.assertEqual(ig.name, 'Master instance group') + self.assertEqual(ig.name, 'Main instance group') def test_describe_jobflows_no_args(self): self.set_http_response(200) @@ -1033,8 +1033,8 @@ def test_run_jobflow_service_role(self): 'Name': 'EmrCluster'}, ignore_params_values=['ActionOnFailure', 'Instances.InstanceCount', 'Instances.KeepJobFlowAliveWhenNoSteps', - 'Instances.MasterInstanceType', - 'Instances.SlaveInstanceType']) + 'Instances.MainInstanceType', + 'Instances.SubordinateInstanceType']) def test_run_jobflow_enable_debugging(self): self.region = 'ap-northeast-2' diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_emr_responses.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_emr_responses.py index dda6b92..656bde7 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_emr_responses.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_emr_responses.py @@ -85,8 +85,8 @@ us-east-1a - m1.small - m1.small + m1.small + m1.small myec2keyname 4 true @@ -281,8 +281,8 @@ j-3H3Q13JPFLU22 - m1.large - i-64c21609 + m1.large + i-64c21609 us-east-1b @@ -300,7 +300,7 @@ Job flow terminated MASTER ig-EVMHOZJ2SCO8 - master + main 2010-10-21T01:00:25Z @@ -315,13 +315,13 @@ Job flow terminated CORE ig-YZHDYVITVHKB - slave + subordinate 40 0.20 - m1.large - ec2-184-72-153-139.compute-1.amazonaws.com + m1.large + ec2-184-72-153-139.compute-1.amazonaws.com myubersecurekey 10 false @@ -361,8 +361,8 @@ def test_JobFlows_example(self): loguri='mybucket/subdir/', name='MyJobFlowName', availabilityzone='us-east-1a', - slaveinstancetype='m1.small', - masterinstancetype='m1.small', + subordinateinstancetype='m1.small', + maininstancetype='m1.small', ec2keyname='myec2keyname', keepjobflowalivewhennosteps='true') @@ -379,8 +379,8 @@ def test_JobFlows_completed(self): loguri='s3n://example.emrtest.scripts/jobflow_logs/', name='RealJobFlowName', availabilityzone='us-east-1b', - slaveinstancetype='m1.large', - masterinstancetype='m1.large', + subordinateinstancetype='m1.large', + maininstancetype='m1.large', ec2keyname='myubersecurekey', keepjobflowalivewhennosteps='false') self.assertEquals(6, len(jobflow.steps)) diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py index cc5c747..eb8b022 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_instance_group_args.py @@ -19,7 +19,7 @@ def test_bidprice_missing_spot(self): """ with self.assertRaisesRegexp(ValueError, 'bidprice must be specified'): InstanceGroup(1, 'MASTER', 'm1.small', - 'SPOT', 'master') + 'SPOT', 'main') def test_bidprice_missing_ondemand(self): """ @@ -27,14 +27,14 @@ def test_bidprice_missing_ondemand(self): ON_DEMAND. """ instance_group = InstanceGroup(1, 'MASTER', 'm1.small', - 'ON_DEMAND', 'master') + 'ON_DEMAND', 'main') def test_bidprice_Decimal(self): """ Test InstanceGroup init works with bidprice type = Decimal. """ instance_group = InstanceGroup(1, 'MASTER', 'm1.small', - 'SPOT', 'master', bidprice=Decimal(1.10)) + 'SPOT', 'main', bidprice=Decimal(1.10)) self.assertEquals('1.10', instance_group.bidprice[:4]) def test_bidprice_float(self): @@ -42,7 +42,7 @@ def test_bidprice_float(self): Test InstanceGroup init works with bidprice type = float. """ instance_group = InstanceGroup(1, 'MASTER', 'm1.small', - 'SPOT', 'master', bidprice=1.1) + 'SPOT', 'main', bidprice=1.1) self.assertEquals('1.1', instance_group.bidprice) def test_bidprice_string(self): @@ -50,7 +50,7 @@ def test_bidprice_string(self): Test InstanceGroup init works with bidprice type = string. """ instance_group = InstanceGroup(1, 'MASTER', 'm1.small', - 'SPOT', 'master', bidprice='1.1') + 'SPOT', 'main', bidprice='1.1') self.assertEquals('1.1', instance_group.bidprice) if __name__ == "__main__": diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_connection.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_connection.py index fbc65b0..8f4bc52 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_connection.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_connection.py @@ -88,7 +88,7 @@ def default_body(self): 2012-10-03T22:01:51.047Z 200 db.m1.large - awsuser + awsuser @@ -150,7 +150,7 @@ def test_get_all_db_instances(self): db.endpoint, (u'mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com', 3306)) self.assertEqual(db.instance_class, 'db.m1.large') - self.assertEqual(db.master_username, 'awsuser') + self.assertEqual(db.main_username, 'awsuser') self.assertEqual(db.availability_zone, 'us-west-2b') self.assertEqual(db.backup_retention_period, 1) self.assertEqual(db.preferred_backup_window, '10:30-11:00') @@ -198,7 +198,7 @@ def default_body(self): mysql - **** + **** 0 false @@ -252,7 +252,7 @@ def default_body(self): sat:07:30-sat:08:00 10 db.m1.large - master + main @@ -267,7 +267,7 @@ def test_create_db_instance_param_group_name(self): 'SimCoProd01', 10, 'db.m1.large', - 'master', + 'main', 'Password01', param_group='default.mysql5.1', db_subnet_group_name='dbSubnetgroup01', @@ -283,8 +283,8 @@ def test_create_db_instance_param_group_name(self): 'DBParameterGroupName': 'default.mysql5.1', 'DBSubnetGroupName': 'dbSubnetgroup01', 'Engine': 'MySQL5.1', - 'MasterUsername': 'master', - 'MasterUserPassword': 'Password01', + 'MainUsername': 'main', + 'MainUserPassword': 'Password01', 'Port': 3306 }, ignore_params_values=['Version']) @@ -293,10 +293,10 @@ def test_create_db_instance_param_group_name(self): self.assertEqual(db.status, 'creating') self.assertEqual(db.allocated_storage, 10) self.assertEqual(db.instance_class, 'db.m1.large') - self.assertEqual(db.master_username, 'master') + self.assertEqual(db.main_username, 'main') self.assertEqual(db.multi_az, False) self.assertEqual(db.pending_modified_values, - {'MasterUserPassword': '****'}) + {'MainUserPassword': '****'}) self.assertEqual(db.parameter_group.name, 'default.mysql5.1') @@ -312,7 +312,7 @@ def test_create_db_instance_param_group_instance(self): 'SimCoProd01', 10, 'db.m1.large', - 'master', + 'main', 'Password01', param_group=param_group, db_subnet_group_name='dbSubnetgroup01') @@ -326,8 +326,8 @@ def test_create_db_instance_param_group_instance(self): 'DBParameterGroupName': 'default.mysql5.1', 'DBSubnetGroupName': 'dbSubnetgroup01', 'Engine': 'MySQL5.1', - 'MasterUsername': 'master', - 'MasterUserPassword': 'Password01', + 'MainUsername': 'main', + 'MainUserPassword': 'Password01', 'Port': 3306, }, ignore_params_values=['Version']) @@ -336,10 +336,10 @@ def test_create_db_instance_param_group_instance(self): self.assertEqual(db.status, 'creating') self.assertEqual(db.allocated_storage, 10) self.assertEqual(db.instance_class, 'db.m1.large') - self.assertEqual(db.master_username, 'master') + self.assertEqual(db.main_username, 'main') self.assertEqual(db.multi_az, False) self.assertEqual(db.pending_modified_values, - {'MasterUserPassword': '****'}) + {'MainUserPassword': '****'}) self.assertEqual(db.parameter_group.name, 'default.mysql5.1') self.assertEqual(db.parameter_group.description, None) @@ -383,7 +383,7 @@ def default_body(self): sat:07:30-sat:08:00 10 db.m1.large - master + main @@ -411,7 +411,7 @@ def test_restore_dbinstance_from_point_in_time(self): self.assertEqual(db.status, 'creating') self.assertEqual(db.allocated_storage, 10) self.assertEqual(db.instance_class, 'db.m1.large') - self.assertEqual(db.master_username, 'master') + self.assertEqual(db.main_username, 'main') self.assertEqual(db.multi_az, False) self.assertEqual(db.parameter_group.name, @@ -445,7 +445,7 @@ def test_create_db_instance_vpc_sg_str(self): 'SimCoProd01', 10, 'db.m1.large', - 'master', + 'main', 'Password01', param_group='default.mysql5.1', db_subnet_group_name='dbSubnetgroup01', @@ -460,8 +460,8 @@ def test_create_db_instance_vpc_sg_str(self): 'DBParameterGroupName': 'default.mysql5.1', 'DBSubnetGroupName': 'dbSubnetgroup01', 'Engine': 'MySQL5.1', - 'MasterUsername': 'master', - 'MasterUserPassword': 'Password01', + 'MainUsername': 'main', + 'MainUserPassword': 'Password01', 'Port': 3306, 'VpcSecurityGroupIds.member.1': 'sg-1', 'VpcSecurityGroupIds.member.2': 'sg-2' @@ -481,7 +481,7 @@ def test_create_db_instance_vpc_sg_obj(self): 'SimCoProd01', 10, 'db.m1.large', - 'master', + 'main', 'Password01', param_group='default.mysql5.1', db_subnet_group_name='dbSubnetgroup01', @@ -496,8 +496,8 @@ def test_create_db_instance_vpc_sg_obj(self): 'DBParameterGroupName': 'default.mysql5.1', 'DBSubnetGroupName': 'dbSubnetgroup01', 'Engine': 'MySQL5.1', - 'MasterUsername': 'master', - 'MasterUserPassword': 'Password01', + 'MainUsername': 'main', + 'MainUserPassword': 'Password01', 'Port': 3306, 'VpcSecurityGroupIds.member.1': 'sg-1', 'VpcSecurityGroupIds.member.2': 'sg-2' @@ -674,9 +674,9 @@ class TestRDSLogFileDownload(AWSMockServiceTestCase): 2014-01-27 09:35:15.44 spid74 I/O is frozen on database rdsadmin. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. -2014-01-27 09:35:15.44 spid73 I/O is frozen on database master. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. +2014-01-27 09:35:15.44 spid73 I/O is frozen on database main. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. -2014-01-27 09:35:25.57 spid73 I/O was resumed on database master. No user action is required. +2014-01-27 09:35:25.57 spid73 I/O was resumed on database main. No user action is required. 2014-01-27 09:35:25.57 spid74 I/O was resumed on database rdsadmin. No user action is required. diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_snapshot.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_snapshot.py index c3c9d8a..aded3cc 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_snapshot.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/rds/test_snapshot.py @@ -27,7 +27,7 @@ def default_body(self): 5.1.50 mydbsnapshot manual - master + main myoptiongroupname 1000 100 @@ -47,7 +47,7 @@ def default_body(self): 5.1.49 mysnapshot1 manual - sa + sa myoptiongroupname 1000 @@ -64,7 +64,7 @@ def default_body(self): 5.1.47 rds:simcoprod01-2012-04-02-00-01 automated - master + main myoptiongroupname 1000 @@ -118,7 +118,7 @@ def default_body(self): 5.1.50 mydbsnapshot manual - master + main @@ -160,7 +160,7 @@ def default_body(self): 5.1.50 mycopieddbsnapshot manual - master + main @@ -202,7 +202,7 @@ def default_body(self): 5.1.47 mysnapshot2 manual - master + main @@ -257,7 +257,7 @@ def default_body(self): sat:07:30-sat:08:00 10 db.m1.large - master + main diff --git a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/route53/test_connection.py b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/route53/test_connection.py index 73d7724..accfa4d 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/route53/test_connection.py +++ b/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/route53/test_connection.py @@ -509,7 +509,7 @@ def default_body(self): 1800 - ns-1929.awsdns-93.net. hostmaster.awsdns.net. 1 10800 3600 604800 1800 + ns-1929.awsdns-93.net. hostmain.awsdns.net. 1 10800 3600 604800 1800 diff --git a/google-cloud-sdk/platform/gsutil/third_party/crcmod/docs/source/conf.py b/google-cloud-sdk/platform/gsutil/third_party/crcmod/docs/source/conf.py index 87abe83..b8c0f1a 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/crcmod/docs/source/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/crcmod/docs/source/conf.py @@ -36,8 +36,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'crcmod' diff --git a/google-cloud-sdk/platform/gsutil/third_party/funcsigs/docs/conf.py b/google-cloud-sdk/platform/gsutil/third_party/funcsigs/docs/conf.py index c6e4194..3b1747c 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/funcsigs/docs/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/funcsigs/docs/conf.py @@ -37,8 +37,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = 'funcsigs' diff --git a/google-cloud-sdk/platform/gsutil/third_party/mock/docs/conf.py b/google-cloud-sdk/platform/gsutil/third_party/mock/docs/conf.py index d32357d..b417324 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/mock/docs/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/mock/docs/conf.py @@ -62,8 +62,8 @@ def __init__(self): # The suffix of source filenames. source_suffix = '.txt' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General substitutions. project = u'Mock' diff --git a/google-cloud-sdk/platform/gsutil/third_party/oauth2client/docs/conf.py b/google-cloud-sdk/platform/gsutil/third_party/oauth2client/docs/conf.py index f84911c..37486d9 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/oauth2client/docs/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/oauth2client/docs/conf.py @@ -48,7 +48,7 @@ def __getattr__(cls, name): ] templates_path = ['_templates'] source_suffix = '.rst' -master_doc = 'index' +main_doc = 'index' # General information about the project. project = u'oauth2client' diff --git a/google-cloud-sdk/platform/gsutil/third_party/rsa/doc/conf.py b/google-cloud-sdk/platform/gsutil/third_party/rsa/doc/conf.py index f2e66a7..a0d0171 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/rsa/doc/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/rsa/doc/conf.py @@ -40,8 +40,8 @@ # The encoding of source files. source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Python-RSA' diff --git a/google-cloud-sdk/platform/gsutil/third_party/six/documentation/conf.py b/google-cloud-sdk/platform/gsutil/third_party/six/documentation/conf.py index 0215bdd..a7cd039 100755 --- a/google-cloud-sdk/platform/gsutil/third_party/six/documentation/conf.py +++ b/google-cloud-sdk/platform/gsutil/third_party/six/documentation/conf.py @@ -28,8 +28,8 @@ # The encoding of source files. #source_encoding = "utf-8-sig" -# The master toctree document. -master_doc = "index" +# The main toctree document. +main_doc = "index" # General information about the project. project = u"six"