Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
iranzo authored Jun 7, 2022
2 parents a591442 + 706801c commit 4e98ea4
Show file tree
Hide file tree
Showing 27 changed files with 241 additions and 135 deletions.
14 changes: 9 additions & 5 deletions .github/workflows/broken-link-check.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
on:
schedule:
- cron: "0 0 * * *" # daily
repository_dispatch: # run manually
- cron: "0 0 * * *"
repository_dispatch:
types: [check-link]
# push:
# ...
workflow_dispatch:

name: Broken Link Check
jobs:
Expand All @@ -13,4 +12,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Broken Link Check
uses: technote-space/[email protected]
uses: technote-space/[email protected]
with:
EXCLUDED_KEYWORDS: |
docs.github.com
camo.githubusercontent.com
github.com/apps/dependabot
13 changes: 2 additions & 11 deletions .github/workflows/testing-pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on:
pull_request_review:
types: [submitted, edited]
pull_request:
types: [labeled]
types: [labeled, synchronize]
#pull_request_target:
# types: [labeled]
workflow_dispatch:
Expand Down Expand Up @@ -93,16 +93,7 @@ jobs:
- name: remove all old deployments and vms
run: |
export KUBECONFIG=/root/.kcli/clusters/test-ci/auth/kubeconfig
oc delete --ignore-not-found=true managedcluster edgecluster0-cluster || true
oc delete --ignore-not-found=true ns edgecluster0-cluster || true
kcli delete vm edgecluster0-cluster-m0 -y || true
kcli delete vm edgecluster0-cluster-m1 -y || true
kcli delete vm edgecluster0-cluster-m2 -y || true
kcli delete vm edgecluster0-cluster-w0 -y || true
list=$(tkn pr ls -n edgecluster-deployer |grep -i running | cut -d' ' -f1)
for i in $list; do
tkn pr cancel $i -n edgecluster-deployer || true
done
EDGE_NAME=edgecluster0-cluster make clean-ci || true
- name: Launching pipeline execution
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ui.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
pull_request:
paths:
- 'ui/**'
types: [labeled]
types: [labeled,synchronize]
pull_request_review:
types: [submitted, edited]

Expand Down
12 changes: 12 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -179,3 +179,15 @@ clean:
oc delete ns $(EDGE_NAME); \
oc rollout restart -n openshift-machine-api deployment/metal3; \
kcli delete vm -y $(EDGE_NAME)-m0 $(EDGE_NAME)-m1 $(EDGE_NAME)-m2 $(EDGE_NAME)-w0

clean-ci:
# From: https://github.com/stolostron/deploy/blob/master/hack/cleanup-managed-cluster.sh
list=$$(tkn pr ls -n edgecluster-deployer |grep -i running | cut -d' ' -f1); \
for i in ${list}; do tkn pr cancel $${i} -n edgecluster-deployer; done; \
oc delete --ignore-not-found=true managedcluster $(EDGE_NAME); \
list=$$($ oc get bmh -n $(EDGE_NAME) --no-headers|awk '{print $$1}'); \
for i in $${list}; do oc patch -n $(EDGE_NAME) bmh $${i} --type json -p '[ { "op": "remove", "path": "/metadata/finalizers" } ]'; done; \
list=$$(oc get secret -n $(EDGE_NAME) --no-headers |grep bmc|awk '{print $$1}'); \
for i in $${list}; do oc patch -n $(EDGE_NAME) secret $${i} --type json -p '[ { "op": "remove", "path": "/metadata/finalizers" } ]'; done; \
oc delete --ignore-not-found=true ns $(EDGE_NAME); \
kcli delete vm -y $(EDGE_NAME)-m0 $(EDGE_NAME)-m1 $(EDGE_NAME)-m2 $(EDGE_NAME)-w0
13 changes: 10 additions & 3 deletions documentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,19 @@ Documentation in this folder will be created by using asciidoc via the `utils/bu

For easing development and preview, a gulp configuration has been created in the root of the repository.

Remember that this still requires your environment to be able to execute `tools/build.sh` so dependencies like `asciidoctor` should be made available first.
Remember that this still requires your environment to be able to execute `utils/build.sh`.

Dependencies should be made available first. If you're using Fedora, the dependencies can be resolved with:

```bash
sudo dnf install yarnpkg rubygem-asciidoctor rubygem-asciidoctor-pdf
sudo yarn global add gulp-cli
```

**Important**: Check your distribution/OS as this naming may change.

For starting `gulp` environment:

- Install `NodeJS` and `Yarn` using your system package manager
- Install `gulp` via: `yarn global add gulp-cli`
- In the root of the repository, run `yarn install`
- Use Live Reload `gulp`

Expand Down
12 changes: 6 additions & 6 deletions documentation/ztp-for-factories/main.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

include::ztp-for-factory-overview-ocp-docs.adoc[leveloffset=+1]

include::ztp-install-factory-hubspoke-architecture.adoc[leveloffset=+1]
include::ztp-install-factory-hubedge-architecture.adoc[leveloffset=+1]

include::ztp-create-factory-hub-cluster.adoc[leveloffset=+1]

Expand All @@ -23,7 +23,7 @@ include::ztp-install-factory-pipeline-overview.adoc[leveloffset=+1]

include::ztp-hub-factory-pipeline.adoc[leveloffset=+2]

include::ztp-spoke-factory-pipeline.adoc[leveloffset=+2]
include::ztp-edge-factory-pipeline.adoc[leveloffset=+2]

include::ztp-pre-hub-install-factory-pipeline-checks.adoc[leveloffset=+1]

Expand All @@ -35,17 +35,17 @@ include::ztp-check-install-hub-factory-pipeline.adoc[leveloffset=+2]

include::ztp-post-install-hub-factory-pipeline-checks.adoc[leveloffset=+2]

include::ztp-install-factory-spoke-pipeline.adoc[leveloffset=+1]
include::ztp-install-factory-edge-pipeline.adoc[leveloffset=+1]

include::ztp-check-install-spoke-factory-pipeline.adoc[leveloffset=+2]
include::ztp-check-install-edge-factory-pipeline.adoc[leveloffset=+2]

include::ztp-post-install-spoke-factory-pipeline-checks.adoc[leveloffset=+2]
include::ztp-post-install-edge-factory-pipeline-checks.adoc[leveloffset=+2]

include::ztp-troubleshooting-factory-pipelines.adoc[leveloffset=+1]

include::ztp-common-expected-errors.adoc[leveloffset=+1]

include::ztp-configuring-spoke-at-remote-site.adoc[leveloffset=+1]
include::ztp-configuring-edge-at-remote-site.adoc[leveloffset=+1]

include::ztpfw-pipelines-flags-arguments.adoc[leveloffset=+1]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,18 @@ You can watch the progress of the pipelines by using the {product-title} web con

.Procedure

. Examine the logs to watch the progress of the `deploy-ztp-spokes`.
. Examine the logs to watch the progress of the `deploy-ztp-edgeclusters`.
+
[source,terminal]
----
$ tkn pipeline logs deploy-ztp-spoke-run-2rklt -f -n spoke-deployer
$ tkn pipeline logs deploy-ztp-edgecluster-run-2rklt -f -n edgecluster-deployer
----
. Log in to the {product-title} web console.
. Navigate to *Pipelines* -> *Pipelines* and select the Project *spoke-deployer*.
. Navigate to *Pipelines* -> *Pipelines* and select the Project *edgecluster-deployer*.
+
[NOTE]
====
The `spoke-deployer` pipeline stores all the artefacts for {product-title} Pipelines.
The `edgecluster-deployer` pipeline stores all the artefacts for {product-title} Pipelines.
====
. Select **PipelineRuns** to drill down into the details of the pipeline runs.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ You can watch the progress of the pipeline by using the {product-title} web cons
+
[source,terminal]
----
$ tkn pipeline logs deploy-ztp-hub-run-2h44k -f -n spoke-deployer
$ tkn pipeline logs deploy-ztp-hub-run-2h44k -f -n edgecluster-deployer
----
. Log in to the {product-title} web console.

. Navigate to *Pipelines* -> *Pipelines* and select the Project *spoke-deployer*.
. Navigate to *Pipelines* -> *Pipelines* and select the Project *edgecluster-deployer*.
+
[NOTE]
====
The `spoke-deployer` pipeline stores all the artifacts for {product-title} Pipelines.
The `edgecluster-deployer` pipeline stores all the artifacts for {product-title} Pipelines.
====

. Select **PipelineRuns** to drill down into detail on the pipeline runs.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Another expected error occurs during the running of **deploy registry** stage of
[source,terminal]
----
[deploy-disconnected-registry : deploy-disconnected-registry] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
[deploy-disconnected-registry : deploy-disconnected-registry] Creating /workspace/ztp/build/spokes.yaml from SPOKES_CONFIG
[deploy-disconnected-registry : deploy-disconnected-registry] Creating /workspace/ztp/build/edgeclusters.yaml from SPOKES_CONFIG
[deploy-disconnected-registry : deploy-disconnected-registry] Waiting for deployment of ztpfw-registry in namespace ztpfw-registry with a timeout 1000 seconds
[deploy-disconnected-registry : deploy-disconnected-registry] Expected generation for deployment ztpfw-registry: 1
[deploy-disconnected-registry : deploy-disconnected-registry] Observed expected generation: 1
Expand All @@ -36,6 +36,6 @@ Another expected error occurs during the running of **deploy registry** stage of
[deploy-disconnected-registry : deploy-disconnected-registry] Mode: hub
[deploy-disconnected-registry : deploy-disconnected-registry] >> Waiting for the MCO to grab the new MachineConfig for the certificate...
failed to get logs for task deploy-disconnected-registry : error in getting logs for step mirror-ocp: error getting logs for pod deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz(step-mirror-ocp) : Get "https://192.168.150.190:10250/containerLogs/spoke-deployer/deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz/step-mirror-ocp?follow=true": dial tcp 192.168.150.190:10250: connect: connection refused
failed to get logs for task deploy-disconnected-registry : error in getting logs for step mirror-olm: error getting logs for pod deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz(step-mirror-olm) : Get "https://192.168.150.190:10250/containerLogs/spoke-deployer/deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz/step-mirror-olm?follow=true": dial tcp 192.168.150.190:10250: connect: connection refused
failed to get logs for task deploy-disconnected-registry : error in getting logs for step mirror-ocp: error getting logs for pod deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz(step-mirror-ocp) : Get "https://192.168.150.190:10250/containerLogs/edgecluster-deployer/deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz/step-mirror-ocp?follow=true": dial tcp 192.168.150.190:10250: connect: connection refused
failed to get logs for task deploy-disconnected-registry : error in getting logs for step mirror-olm: error getting logs for pod deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz(step-mirror-olm) : Get "https://192.168.150.190:10250/containerLogs/edgecluster-deployer/deploy-ztp-hub-run-wt5kr-deploy-disconnected-registry-kxm-585tz/step-mirror-olm?follow=true": dial tcp 192.168.150.190:10250: connect: connection refused
----
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,19 @@ include::modules/common-attributes.adoc[]
[discrete]
=== Base prerequisites

* Deploy the {product-title} cluster with three control plane nodes following the guidance in the section "Deploying installer-provisioned clusters on bare metal". Alternatively you can use the technology preview Assisted Installer from link:https://cloud.redhat.com/[cloud.redhat.com] to create the cluster. To install single-node OpenShift follow the guidance in "Installing on a single node" in the {product-title} documentation.
** All cluster Operators are available.
** Cluster is reachable using a `KUBECONFIG` file.
** The API, API-INT and ingress should be deployed on edge cluster on the DHCP external network.
* Deploy the {product-title} cluster with three control plane nodes following the guidance in the section link:https://docs.openshift.com/container-platform/4.10/installing/installing_bare_metal_ipi/ipi-install-prerequisites.html[Deploying installer-provisioned clusters on bare metal].

** Alternatively you can use the technology preview Assisted Installer from link:https://cloud.redhat.com/[cloud.redhat.com] to create the cluster.

* To install single-node OpenShift follow the guidance in link:https://docs.openshift.com/container-platform/4.10/installing/installing_sno/install-sno-installing-sno.html[Installing on a single node] in the {product-title} documentation.

* All cluster Operators are available.

* Cluster is reachable using a `KUBECONFIG` file.

* The dns names for `api.<hub-clustername>.<baseDomain>`, `api-int.<hub-clustername>.<baseDomain>` and `*.apps.<hub-clustername>.<baseDomain>` should be deployed on edge cluster on the DHCP external network.

* link:https://metal3.io/[Metal³] has to be available in the hub cluster.

[discrete]
=== Storage prerequisites
Expand Down Expand Up @@ -47,13 +56,18 @@ Specific requirements are:
** ingress (`*.apps.<hub-clustername>.<baseDomain>`).

* Edge
** `api.<spoke-clustername>.<baseDomain>` and `api-int.<spoke-clustername>.<baseDomain>` entries to the same IP address.
** ingress (`*.apps.<spoke-clustername>.<baseDomain>`).
** `api.<edge-cluster-name>.<baseDomain>` and `api-int.<edge-cluster-name>.<baseDomain>` entries to the same IP address.
** ingress (`*.apps.<edge-cluster-name>.<baseDomain>`).

[NOTE]
====
When deploying a single-node OpenShift cluster, the `api.<edge-cluster-name>.<baseDomain>` and `*.apps.<edge-cluster-name>.<baseDomain>` must be configured with different IP addresses.
====

* External DHCP with enough free IPs on the factory network to provide access to the edge cluster by using the external network interface.

* Every edge cluster needs at least 6 IPs from this external network (without the broadcast and network IP).
** 1 per node.
** 1 for API.
** 1 for API-INT.
** 1 for the Ingress entry (`*.apps.<spoke-clustername>.<baseDomain>`).
** 1 for the Ingress entry (`*.apps.<edge-cluster-name>.<baseDomain>`).
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ This step deploys the NMState and the MetalLB Operators. NMState creates one pro

Deploy OpenShift Data Foundation::

This step deploys the Local Storage Operator and also OpenShift Data Foundation (ODF). ODF and the Local Storage Operator uses disks defined in the `storage_disk` section of the `spokes.yaml` configuration file to create persistent volumes. ODF generates the storage classes and dynamically provisions the persistent volumes. This provides the storage necessary to host the disconnected registry images (Quay).
This step deploys the Local Storage Operator and also OpenShift Data Foundation (ODF). ODF and the Local Storage Operator uses disks defined in the `storage_disk` section of the `edgeclusters.yaml` configuration file to create persistent volumes. ODF generates the storage classes and dynamically provisions the persistent volumes. This provides the storage necessary to host the disconnected registry images (Quay).

Deploy Quay::

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ toc::[]

include::ztp-for-factory-overview-ocp-docs.adoc[leveloffset=+1]

include::ztp-install-factory-hubspoke-architecture.adoc[leveloffset=+1]
include::ztp-install-factory-hubedgecluster-architecture.adoc[leveloffset=+1]

include::ztp-create-factory-hub-cluster.adoc[leveloffset=+1]

Expand Down Expand Up @@ -44,7 +44,7 @@ include::ztp-hub-factory-pipeline.adoc[leveloffset=+2]
* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.4/html/install/installing[Install and uninstall Red Hat Advanced Cluster Management]
* xref:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp-deploying-disconnected.adoc#enabling-assisted-installer-service-on-bare-metal_ztp-deploying-disconnected[Enabling assisted installer service on bare metal]
include::ztp-spoke-factory-pipeline.adoc[leveloffset=+2]
include::ztp-edgecluster-factory-pipeline.adoc[leveloffset=+2]

include::ztp-pre-hub-install-factory-pipeline-checks.adoc[leveloffset=+1]

Expand All @@ -66,17 +66,17 @@ include::ztp-check-install-hub-factory-pipeline.adoc[leveloffset=+2]

include::ztp-post-install-hub-factory-pipeline-checks.adoc[leveloffset=+2]

include::ztp-install-factory-spoke-pipeline.adoc[leveloffset=+1]
include::ztp-install-factory-edgecluster-pipeline.adoc[leveloffset=+1]

include::ztp-check-install-spoke-factory-pipeline.adoc[leveloffset=+2]
include::ztp-check-install-edgecluster-factory-pipeline.adoc[leveloffset=+2]

include::ztp-post-install-spoke-factory-pipeline-checks.adoc[leveloffset=+2]
include::ztp-post-install-edgecluster-factory-pipeline-checks.adoc[leveloffset=+2]

include::ztp-troubleshooting-factory-pipelines.adoc[leveloffset=+1]

include::ztp-common-expected-errors.adoc[leveloffset=+1]

include::ztp-configuring-spoke-at-remote-site.adoc[leveloffset=+1]
include::ztp-configuring-edgecluster-at-remote-site.adoc[leveloffset=+1]

include::ztpfw-pipelines-flags-arguments.adoc[leveloffset=+1]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ The hub cluster can be a single-node OpenShift cluster deploying multiple single
The hub cluster is also known as the factory cluster.
====

The following are the possible combinations of hub and spoke cluster topologies:
The following are the possible combinations of hub and edgecluster cluster topologies:

.Cluster topologies
[options="header"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Installer-provisioned installation of {product-title} requires:
. `api.<edgecluster-domain>.<net-domain>` and `api-int.<edgecluster-domain>.<net-domain>` entries to the same IP address
. ingress (*.apps.<edgecluster-domain>.<net-domain>)
- External DHCP with some free IPs on the factory to provide access to the Edge-cluster using the external network interface
- Every Edge-cluster will need at least ~6 IPs from this External Network (without the broadcast and network ip)
- Every Edge-cluster will need at least ~6 IPs from this External Network (without the broadcast and network IP)
. 1 per node
. 1 API and same for API-INT
. 1 for the Ingress entry (*.apps.<edgecluster-domain>.<net-domain>)
Expand Down
Loading

0 comments on commit 4e98ea4

Please sign in to comment.