From ff9ec96108e498256593a917bdc5230fb0b09f09 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 31 Jul 2018 15:48:42 +0200 Subject: [PATCH] documentation mash commit --- doc-notes.md | 25 ++-- doc/source/advanced.md | 2 +- doc/source/amazon/efs_storage.rst | 2 +- doc/source/create-k8s-cluster.rst | 14 ++- doc/source/extending-jupyterhub.rst | 40 ++++--- doc/source/getting-started.rst | 16 +-- doc/source/google/step-zero-gcp.rst | 23 ++-- doc/source/index.rst | 12 +- doc/source/microsoft/step-zero-azure.rst | 4 +- doc/source/optimization.md | 2 +- doc/source/reference.txt | 15 ++- doc/source/security.md | 2 +- doc/source/setup-helm.rst | 26 ++--- doc/source/setup-jupyterhub.rst | 82 +++++-------- doc/source/tips.rst | 4 +- doc/source/tools.rst | 20 ++-- doc/source/troubleshooting.rst | 4 +- doc/source/turn-off.rst | 76 ++++++------ doc/source/upgrading.md | 2 +- doc/source/user-environment.rst | 142 +++++++++++------------ doc/source/user-management.md | 2 +- doc/source/user-resources.rst | 4 +- doc/source/user-storage.md | 3 +- jupyterhub/schema.yaml | 10 +- jupyterhub/values.yaml | 3 +- 25 files changed, 261 insertions(+), 274 deletions(-) diff --git a/doc-notes.md b/doc-notes.md index a7c1fce354..b66ba10591 100644 --- a/doc-notes.md +++ b/doc-notes.md @@ -1,37 +1,36 @@ index creating your kubernetes cluster: - creating your cluster - zero-gke + creating your cluster OK + zero-gke OK creating your jupyterhub: - getting started - setting up helm - setting up jupyterhub - turning off jupyterhub and computational resources + Getting Started OK + Setting up Helm OK + Setting up JupyterHub OK + Tearing down everything OK -customization guide - extending your jh setup - applying config changes +Customization guide + Customizing your deployment (extending) OK - customizing user environment + Customizing the User Environment use an existing docker image build a custom docker image with repo2docker user jupyterlab by default set env variables pre-populating users home dir - user resources + Customizing User Resources ser user memory and cpu guarantees/limits modifying user storage type and size expanding and contracting the size of your cluster - user storage in jupyterhub + Customizing User Storage how can this process break down configuration torn off per-user persistent storage - user management + Customizing User Management culling user pods admin users authenticating users \ No newline at end of file diff --git a/doc/source/advanced.md b/doc/source/advanced.md index df022ebf95..926aafba2d 100644 --- a/doc/source/advanced.md +++ b/doc/source/advanced.md @@ -270,7 +270,7 @@ is added to the cluster. By enabling the **continuous pre-puller** (default state is disabled), the user image will be pre-pulled when adding a new node. When enabled, the **continuous pre-puller** runs as a [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -to force kubernetes to pull the user image on all nodes as soon as a node is +to force Kubernetes to pull the user image on all nodes as soon as a node is present. The continuous pre-puller uses minimal resources on all nodes and greatly speeds up the user pod start time. diff --git a/doc/source/amazon/efs_storage.rst b/doc/source/amazon/efs_storage.rst index 2ad17846a0..73b9c875ea 100644 --- a/doc/source/amazon/efs_storage.rst +++ b/doc/source/amazon/efs_storage.rst @@ -7,7 +7,7 @@ ElasticFileSystem is distributed file system which speaks the NFS protocol. It Drawbacks: -* Setting permissions on persistent volumes is not nailed down in the kubernetes spec yet. This adds some complications we will discuss later. +* Setting permissions on persistent volumes is not nailed down in the Kubernetes spec yet. This adds some complications we will discuss later. * A crafty user may be able to contact the EFS server directly and read other user's files depending on how the system is setup. diff --git a/doc/source/create-k8s-cluster.rst b/doc/source/create-k8s-cluster.rst index 12eedb5f28..238322de00 100644 --- a/doc/source/create-k8s-cluster.rst +++ b/doc/source/create-k8s-cluster.rst @@ -1,12 +1,14 @@ .. _create-k8s-cluster: -Creating your Kubernetes Cluster -============================= +Setup a Kubernetes Cluster +========================== -Kubernetes' documentation describes the many `ways to set up a cluster`_. -Here, we shall provide quick instructions for the most painless and -popular ways of getting setup in various cloud providers and on other -infrastructure. Choose one option and proceed. +Kubernetes' documentation describes the many `ways to set up a cluster +`_. We provide quick +instructions for the most painless and popular ways of setting up a Kubernetes +cluster on various cloud providers and on other infrastructure. + +Choose one option and proceed. .. toctree:: :titlesonly: diff --git a/doc/source/extending-jupyterhub.rst b/doc/source/extending-jupyterhub.rst index 41918bf474..c71865dbeb 100644 --- a/doc/source/extending-jupyterhub.rst +++ b/doc/source/extending-jupyterhub.rst @@ -1,11 +1,11 @@ .. _extending-jupyterhub: -Extending your JupyterHub setup -=============================== +Customizing your Deployment +=========================== -The helm chart used to install JupyterHub has a lot of options for you to tweak. -For a semi-complete list of the changes you can apply via your helm-chart, -see the :ref:`helm-chart-configuration-reference`. +The Helm chart used to install your JupyterHub deployment has a lot of options +for you to tweak. For a semi-complete reference list of the options, see the +:ref:`helm-chart-configuration-reference`. .. _apply-config-changes: @@ -14,21 +14,25 @@ Applying configuration changes The general method to modify your Kubernetes deployment is to: -1. Make a change to your ``config.yaml`` +1. Make a change to your ``config.yaml``. + 2. Run a ``helm upgrade``: - .. code-block:: bash + .. code-block:: bash - helm upgrade jupyterhub/jupyterhub --version=0.7 --values config.yaml + helm upgrade jupyterhub/jupyterhub \ + --version=0.7 \ + --values config.yaml - Where ```` is the parameter you passed to ``--name`` when - `installing jupyterhub `_ with - ``helm install``. If you don't remember it, you can probably find it by doing - ``helm list``. -3. Wait for the upgrade to finish, and make sure that when you do - ``kubectl --namespace= get pod`` the hub and proxy pods are - in ``Ready`` state. Your configuration change has been applied! + Note that ``helm list`` should display ```` if you forgot it. -For information about the many things you can customize with changes to -your helm chart, see :ref:`user-environment`, :ref:`user-resources`, and -:ref:`helm-chart-configuration-reference`. +3. Verify that the *hub* and *proxy* pods entered the ``Running`` state after + the upgrade completed. + + .. code-block:: bash + + kubectl --namespace= get pod + +For information about the many things you can customize with changes to your +Helm chart through values provided to its templates through ``config.yaml``, see +the :ref:`customization-guide`. diff --git a/doc/source/getting-started.rst b/doc/source/getting-started.rst index e2f0ed2c3b..77d98a1300 100644 --- a/doc/source/getting-started.rst +++ b/doc/source/getting-started.rst @@ -1,6 +1,6 @@ .. _getting-started: -Getting started +Getting Started =============== **JupyterHub** lets you create custom computing environments that users can @@ -21,7 +21,7 @@ And may end up gaining experience with: .. note:: - For a more extensive description of the tools and services that JupyterHub + For a more elaborate introduction to the tools and services that JupyterHub depends upon, see our :ref:`tools` page. @@ -29,12 +29,8 @@ Verify JupyterHub dependencies ------------------------------ At this point, you should have completed *Step Zero* and have an operational -Kubernetes cluster. You will already have a cloud provider/infrastructure and -kubernetes. +Kubernetes cluster made available through a cloud provider/infrastructure. If +not, see :ref:`create-k8s-cluster`. -If you need to create a Kubernetes cluster, see -:ref:`create-k8s-cluster`. - -We also depend on Helm and the JupyterHub Helm chart for your JupyterHub -deployment. We'll deploy them in this section. Let's begin by moving on to -:ref:`setup-helm`. +You will use Helm and the JupyterHub Helm chart for your JupyterHub deployment. +Let's get started by moving on to :ref:`setup-helm`. diff --git a/doc/source/google/step-zero-gcp.rst b/doc/source/google/step-zero-gcp.rst index 128a0c6dc5..6e20d04149 100644 --- a/doc/source/google/step-zero-gcp.rst +++ b/doc/source/google/step-zero-gcp.rst @@ -43,10 +43,11 @@ your google cloud account. b. **Use your own computer's terminal:** 1. Download and install the `gcloud` command line tool at its `downloads - page `_. + page `_. It will help you + create and communicate with a Kubernetes cluster. - 2. Install ``kubectl`` (read *kube control*), it is a tool for controlling - kubernetes. From your terminal, enter: + 2. Install ``kubectl`` (reads *kube control*), it is a tool for controlling + Kubernetes clusters in general. From your terminal, enter: .. code-block:: bash @@ -69,8 +70,8 @@ your google cloud account. A single node from the default node pool created below will be responsible for running the essential pods of the JupyterHub chart. We recommend choosing - a cheap machine type like `n1-standard-1` initially and upgrade it at a later - stage if it is found to be overburdened. + a cheap machine type like `n1-standard-1` initially and upgrading it at a + later stage if it is found to be overburdened. See the `node pool documentation `_ for @@ -85,8 +86,8 @@ your google cloud account. --node-labels hub.jupyter.org/node-purpose=core * ``--machine-type`` specifies the amount of CPU and RAM in each node within - this default node pool. There is a `variety of types `_ - to choose from. + this default node pool. There is a `variety of types + `_ to choose from. * ``--num-nodes`` specifies how many nodes to spin up. @@ -97,14 +98,14 @@ your google cloud account. means that the amount of nodes is automatically adjusted along with the amount of users scheduled. - The `n1-standard-2` machine type has 2CPUs and 7.5G of RAM each of which - about 0.2 CPU will be requested by system pods. It is a suitable choice for - a free account that has a limit on a total of 8 CPU cores. + The `n1-standard-2` machine type has 2 CPUs and 7.5 GB of RAM each of which + about 0.2 CPU will be requested by system pods. It is a suitable choice for a + free account that has a limit on a total of 8 CPU cores. Note that the node pool is *tainted*. Only user pods that is configured with a *toleration* for this taint can schedule on the node pool's nodes. This is done in order to ensure the autoscaler will be able to scale down - when the users have left. + when the user pods have stopped. .. code-block:: bash diff --git a/doc/source/index.rst b/doc/source/index.rst index f1eaa25c92..0ab8a8ff6f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,22 +21,22 @@ page`_. .. _getting-to-zero: -Creating your Kubernetes cluster ---------------------------------------------- +Setup a Kubernetes cluster +-------------------------- This section describes a Kubernetes cluster and outlines how to complete *Step Zero: your Kubernetes cluster* for different cloud providers and infrastructure. .. toctree:: :titlesonly: - :caption: Creating your Kubernetes cluster + :caption: Setup a Kubernetes cluster create-k8s-cluster .. _creating-your-jupyterhub: -Creating your JupyterHub ------------------------- +Setup JupyterHub +---------------- This tutorial starts from *Step Zero: your Kubernetes cluster* and describes the steps needed for you to create a complete initial JupyterHub deployment. @@ -45,7 +45,7 @@ an initial deployment. .. toctree:: :maxdepth: 1 - :caption: Creating your JupyterHub + :caption: Setup JupyterHub getting-started setup-helm diff --git a/doc/source/microsoft/step-zero-azure.rst b/doc/source/microsoft/step-zero-azure.rst index a160292fad..faa806bfdf 100644 --- a/doc/source/microsoft/step-zero-azure.rst +++ b/doc/source/microsoft/step-zero-azure.rst @@ -150,7 +150,7 @@ Step Zero: Kubernetes on Microsoft Azure Container Service (AKS) * ``--name`` is the name you want to use to refer to your cluster * ``--resource-group`` is the ResourceGroup you created in step 4 * ``--ssh-key-value`` is the ssh public key created in step 7 - * ``--node-count`` is the number of nodes you want in your kubernetes cluster + * ``--node-count`` is the number of nodes you want in your Kubernetes cluster * ``--node-vm-size`` is the size of the nodes you want to use, which varies based on what you are using your cluster for and how much RAM/CPU each of your users need. There is a `list of all possible node sizes `_ @@ -188,7 +188,7 @@ Step Zero: Kubernetes on Microsoft Azure Container Service (AKS) kubectl get node - The response should list three running nodes and their kubernetes versions! + The response should list three running nodes and their Kubernetes versions! Each node should have the status of ``Ready``, note that this may take a few moments. diff --git a/doc/source/optimization.md b/doc/source/optimization.md index 118d301f6d..81c3b08b9c 100644 --- a/doc/source/optimization.md +++ b/doc/source/optimization.md @@ -34,7 +34,7 @@ container image will be pre-pulled when a new node is added. New nodes can for example be added manually or by a cluster autoscaler. The **continuous pre-puller** uses a [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -to force kubernetes to pull the user image on all nodes as soon as a node is +to force Kubernetes to pull the user image on all nodes as soon as a node is present. The continuous pre-puller uses minimal resources on all nodes and greatly speeds up the user pod start time. diff --git a/doc/source/reference.txt b/doc/source/reference.txt index f7044f32ec..a38ff82793 100644 --- a/doc/source/reference.txt +++ b/doc/source/reference.txt @@ -3,11 +3,14 @@ DO NOT EDIT THIS LINE. This file is used in `conf.py to generate schema.md`. Edi .. _helm-chart-configuration-reference: ``` -# Helm Chart Configuration Reference +# Configuration Reference -The [JupyterHub helm chart](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) is configurable so that you can customize your JupyterHub setup however you'd like. You can extend user resources, build off of different Docker images, manage security and authentication, and more. +The [JupyterHub Helm chart](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) +is configurable by values in `config.yaml`. This means that you can customize +your JupyterHub deployment in many ways. You can extend user resources, build +off of different Docker images, manage security and authentication, and more. -Below is a description of the fields that are exposed with the JupyterHub helm chart. -For more guided information about some specific things you can do with -modifications to the helm chart, see the [extending jupyterhub](extending-jupyterhub.html) -and [user environment](user-environment.html) pages. +Below is a description of the fields that are exposed with the JupyterHub helm +chart. For more guided information about some specific things you can do with +modifications to the helm chart, see the [extending jupyterhub] +(extending-jupyterhub.html) and [user environment](user-environment.html) pages. diff --git a/doc/source/security.md b/doc/source/security.md index 162084c79b..f0502ff500 100644 --- a/doc/source/security.md +++ b/doc/source/security.md @@ -191,7 +191,7 @@ users to grant themselves more privileges, access other users' content without permission, run (unprofitable) bitcoin mining operations & various other not-legitimate activities. By default, we do not allow access to the [service account credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) needed -to access the kubernetes API from user servers for this reason. +to access the Kubernetes API from user servers for this reason. If you want to (carefully!) give access to the Kubernetes API to your users, you can do so with the following in your `config.yaml`: diff --git a/doc/source/setup-helm.rst b/doc/source/setup-helm.rst index 72f1019d00..0ce34b7ee8 100644 --- a/doc/source/setup-helm.rst +++ b/doc/source/setup-helm.rst @@ -5,15 +5,14 @@ Setting up Helm `Helm `_, the package manager for Kubernetes, is a useful tool for: installing, upgrading and managing applications on a Kubernetes cluster. -The Helm packages are called *charts*. We will be install and manage JupyterHub -on our kubernetes cluster with a Helm chart. +Helm packages are called *charts*. We will be install and manage JupyterHub on +our Kubernetes cluster with a Helm chart. Helm has two parts: a client (`helm`) and a server (`tiller`). Tiller runs -inside of your Kubernetes cluster as a pod in the kube-system namespace and +inside of your Kubernetes cluster as a pod in the kube-system namespace. Tiller manages *releases* (installations) and *revisions* (versions) of charts deployed -on the kubernetes cluster. When you run `helm` commands, your local Helm client -sends instructions to `tiller` in the cluster that in turn make the requested -changes. +on the cluster. When you run `helm` commands, your local Helm client sends +instructions to `tiller` in the cluster that in turn make the requested changes. Installation ------------ @@ -25,8 +24,9 @@ terminal: curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash -`Alternative methods for helm installation `_ -exist if you prefer to install without using the script. +`Alternative methods for helm installation +`_ exist if you +prefer or need to install without using the script. .. _helm-rbac: @@ -50,14 +50,13 @@ cluster: kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - See the `RBAC documentation - `_ for more - information. + See `our RBAC documentation + `_ for more information. .. note:: While most clusters have RBAC enabled and you need this line, you **must** - skip this step if your kubernetes cluster does not have RBAC enabled. + skip this step if your Kubernetes cluster does not have RBAC enabled. 3. Initialize `helm` and `tiller`. @@ -120,5 +119,4 @@ Ensure that `tiller is secure `! +Congratulations, Helm is now set up! Let's continue with :ref:`setup-jupyterhub`! diff --git a/doc/source/setup-jupyterhub.rst b/doc/source/setup-jupyterhub.rst index 9584e1d5ac..61ae1c8514 100644 --- a/doc/source/setup-jupyterhub.rst +++ b/doc/source/setup-jupyterhub.rst @@ -12,8 +12,8 @@ Prepare configuration file This step prepares a `YAML `_ configuration file (`config.yaml`) for the generic JupyterHub deployment declared by the Helm -chart. Helm charts contains templates for kubernetes resources to be installed -in a kubernetes cluster. This config file will provide values to be used by +chart. Helm charts contains templates for Kubernetes resources to be installed +in a Kubernetes cluster. This config file will provide values to be used by these templates. It's important to save the config file in a safe place. The config file is @@ -43,31 +43,7 @@ For the following steps, use your favorite code editor. We'll use the .. code-block:: yaml proxy: - secretToken: "" - - singleuser: - defaultUrl: - cpu: - limit: - guarantee: - memory: - limit: - guarantee: 1G - - scheduling: - podPriority: - enabled: true - userPlaceholder: - replicas: 0 - userDummy: - replicas: 0 - corePods: - nodeAffinity: - matchNodePurpose: "require" - userPods: - nodeAffinity: - matchNodePurpose: "require" - + secretToken: "" .. Don't put an example here! People will just copy paste that & that's a security issue. @@ -101,47 +77,49 @@ Install JupyterHub .. code:: bash - helm upgrade --install jupyterhub/jupyterhub \ - --version v0.7 \ - --name jh \ - --namespace jh \ - --values config.yaml + helm upgrade jupyterhub/jupyterhub \ + --install \ + --namespace \ + --version v0.7 \ + --values config.yaml where: - - ``--name`` refers to a `Helm release name + - ```` refers to a `Helm release name `_, an identifier used to differentiate chart installations. You need it when you are changing or - deleting the configuration of this chart installation. If your kubernetes + deleting the configuration of this chart installation. If your Kubernetes cluster will contain multiple JupyterHubs make sure to differentiate them. You can list Helm releases with ``helm list``. - - ``--namespace`` refers to a `Kubernetes namespace + - ```` refers to a `Kubernetes namespace `_, an identifier used to group Kubernetes resources, in this case all - kubernetes resources associated with the JupyterHub chart. You'll need the + Kubernetes resources associated with the JupyterHub chart. You'll need the namespace identifier for performing any commands with ``kubectl``. - We recommend providing the same value to ``--name`` and ``--namespace`` - for now to avoid too much confusion, but advanced users of Kubernetes and - helm should feel free to use different values. + We recommend providing the same value (*jh* for example) to + ```` and ```` for now to avoid too much + confusion, but advanced users of Kubernetes and helm should feel free to use + different values. .. note:: * This step may take a moment, during which time there will be no output to your terminal. JupyterHub is being installed in the background. - * If you get a ``release named already exists`` error, then - you should delete the release by running - ``helm delete --purge ``. Then reinstall by repeating this - step. If it persists, also do ``kubectl delete `` and try again. + * If you get a ``release named already exists`` error, + then you should delete the release by running ``helm delete --purge + ``. Then reinstall by repeating this step. If it + persists, also do ``kubectl delete namespace `` and try + again. * In general, if something goes *wrong* with the install step, delete the Helm release by running ``helm delete --purge `` before re-running the install command. * If you're pulling from a large Docker image you may get a - ``Error: timed out waiting for the condition`` error, - add a ``--timeout=SOME-LARGE-NUMBER`` - parameter to the ``helm install`` command. + ``Error: timed out waiting for the condition`` error, add a + ``--timeout=SOME-LARGE-NUMBER-OF-SECONDS`` parameter to the ``helm + install`` command. * The ``--version`` parameter corresponds to the *version of the helm chart*, not the version of JupyterHub. Each version of the JupyterHub helm chart @@ -190,7 +168,11 @@ Install JupyterHub to a browser. JupyterHub is running with a default *dummy* authenticator so entering any username and password combination will let you enter the hub. -Congratulations! Now that you have JupyterHub running, you can `extend it -`_ in many ways. You can use a pre-built image for the -user container, build your own image, configure different authenticators, and -more! +Congratulations! Now that you have basic JupyterHub running, you can `extend it +`_ and `optimize it `_ in many +ways to meet your needs. + +* Configure the login to use the account that makes sense to you (Google, GitHub, etc.). +* Use a suitable pre-built image for the user container or build your own. +* Host it on https://your-domain.com. +* ... diff --git a/doc/source/tips.rst b/doc/source/tips.rst index b891c5b95c..f7a62d3722 100644 --- a/doc/source/tips.rst +++ b/doc/source/tips.rst @@ -29,7 +29,7 @@ Managing ``kubectl`` contexts Oftentimes people manage multiple Kubernetes deployments at the same time. ``kubectl`` handles this with the idea of "contexts", which specify which -kubernetes deployment you are referring to when you type ``kubectl get XXX``. +Kubernetes deployment you are referring to when you type ``kubectl get XXX``. To see a list of contexts currently available to you, use the following command: @@ -86,7 +86,7 @@ Asking for a more verbose or structured output Sometimes the information that's in the default output for ``kubectl get `` is not enough for your needs, or isn't structured the way you'd like. We -recommend looking into the different kubernetes output options, which can be +recommend looking into the different Kubernetes output options, which can be modified like so: .. code-block:: bash diff --git a/doc/source/tools.rst b/doc/source/tools.rst index 66d048c6fe..128c9eeec9 100644 --- a/doc/source/tools.rst +++ b/doc/source/tools.rst @@ -91,7 +91,7 @@ of your cluster deployment, and allows a user to specify the computational requirements that they need (e.g., how many machines, how many CPUs per machine, how much RAM). Then, it handles the resources on the cluster and ensures that these resources are always available. If something goes down, -kubernetes will try to automatically bring it back up. +Kubernetes will try to automatically bring it back up. Kubernetes can only manage the computing resources that it is given. This means that it generally can **not** create new resources on its @@ -137,7 +137,7 @@ For more information about pods, see the Deployments *********** -A deployment is a collection of pods on kubernetes. It is how kubernetes +A deployment is a collection of pods on Kubernetes. It is how Kubernetes knows exactly what containers and what machines need to be running at all times. For example, if you have two pods: one that does the authenticating described above, and another that manages a database, you can specify both @@ -202,23 +202,23 @@ For more information on Persistent Volume Claims, see the Helm ---- -`Helm `_ is a way of specifying kubernetes objects +`Helm `_ is a way of specifying Kubernetes objects with a standard template. Charts ****** -The way that Helm controls kubernetes is with templates of structured +The way that Helm controls Kubernetes is with templates of structured information that specify some computational requirements. These templates are called "charts", or "helm charts". They contain -all of the necessary information for kubernetes to generate: +all of the necessary information for Kubernetes to generate: - a deployment object - a service object - a persistent volume object for a deployment. - collections of the above components -They can be installed into a namespace, which causes kubernetes to +They can be installed into a namespace, which causes Kubernetes to begin deploying the objects above into that namespace. Charts have both names and versions, which means that you can easily @@ -235,7 +235,7 @@ A release is basically a specific instantiation of a helmchart inserted into a particular namespace. If you'd like to upgrade your kubernetes deployment (say, by changing the amount of RAM that each user should get), then you can change the helm chart, then re-deploy -it to your kubernetes cluster. This generates a new version of the release. +it to your Kubernetes cluster. This generates a new version of the release. JupyterHub @@ -243,7 +243,7 @@ JupyterHub JupyterHub is a way of utilizing the components above in order to provide computational environments that users can access remotely. -It exists as two kubernetes deployments, Proxy and Hub, each of which has +It exists as two Kubernetes deployments, Proxy and Hub, each of which has one pod. Each deployment accomplishes some task that, together, make up JupyterHub. Finally, the output of JupyterHub is a user pod, which specifies the computational environment in which a single user will operate. So @@ -277,9 +277,9 @@ Receives traffic from the proxy pod. It has 3 main running processes: 1. An authenticator, which can verify a user's account. It also contains a process. -2. A "KubeSpawner" that talks to the kubernetes API and tells it to spawn +2. A "KubeSpawner" that talks to the Kubernetes API and tells it to spawn pods for users if one doesn't already exist. KubeSpawner will tell - kubernetes to create a pod for a new user, then it will tell the + Kubernetes to create a pod for a new user, then it will tell the the Proxy Pod that the user’s pod has been created. 3. An admin panel that has information about who has pods created, and what kind of usage exists on the cluster. diff --git a/doc/source/troubleshooting.rst b/doc/source/troubleshooting.rst index d85be3375f..e68632210d 100644 --- a/doc/source/troubleshooting.rst +++ b/doc/source/troubleshooting.rst @@ -9,10 +9,10 @@ For information on debugging Kubernetes, see :ref:`debug`. I thought I had deleted my cloud resources, but they still show up. Why? ------------------------------------------------------------------------ -You probably deleted the specific nodes, but not the kubernetes cluster that +You probably deleted the specific nodes, but not the Kubernetes cluster that was controlling those nodes. Kubernetes is designed to make sure that a specific set of resources is available at all times. This means that if you -only delete the nodes, but not the kubernetes instance, then it will detect +only delete the nodes, but not the Kubernetes instance, then it will detect the loss of computers and will create two new nodes to compensate. How does billing for this work? diff --git a/doc/source/turn-off.rst b/doc/source/turn-off.rst index 42b2c12a45..fed2b4601e 100644 --- a/doc/source/turn-off.rst +++ b/doc/source/turn-off.rst @@ -1,40 +1,43 @@ .. _turn-off: -Turning Off JupyterHub and Computational Resources -================================================== +Tearing Everything Down +======================= When you are done with your hub, you should delete it so you are no longer paying money for it. The following sections describe how to delete your -JupyterHub resources on various cloud providers. +JupyterHub deployment and associated cloud resources on various cloud providers. Tearing down your JupyterHub entails: -1. Deleting your Kubernetes namespace, which deletes all objects created and managed by Kubernetes -2. Deleting any computational resources you've requested from the cloud provider -3. Running a final check to make sure there aren't any lingering resources that haven't been deleted - (e.g., storage volumes in some cloud providers) +1. Deleting your Kubernetes namespace, which deletes all objects created and + managed by Kubernetes in it. + +2. Deleting any cloud resources you've requested from the cloud provider. + +3. Running a final check to make sure there aren't any lingering resources that + haven't been deleted (e.g., storage volumes in some cloud providers). For all cloud providers ----------------------- .. _delete-namespace: -Delete the helm namespace -~~~~~~~~~~~~~~~~~~~~~~~~~ +Delete the helm release +~~~~~~~~~~~~~~~~~~~~~~~ The steps in this section must be performed for all cloud providers first, before doing the cloud provider specific setup. -1. First, delete the helm release. This deletes all resources that were created - by helm to make your jupyterhub. +1. First, delete the Helm release. This deletes all resources that were created + by Helm for your JupyterHub deployment. .. code-block:: bash helm delete --purge -2. Next, delete the namespace the hub was installed in. This deletes any disks - that may have been created to store user's data, and any IP addresses that - may have been provisioned. +2. Next, delete the Kubernetes namespace the hub was installed in. This deletes + any disks that may have been created to store user's data, and any IP + addresses that may have been provisioned. .. code-block:: bash @@ -43,10 +46,11 @@ before doing the cloud provider specific setup. Google Cloud Platform --------------------- -1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic steps will - delete the helm chart and delete the hub's namespace. This must be done before proceeding. +1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic + steps will delete the Helm release and the Kubernetes namespace. This must be + done before proceeding. -2. Delete the kubernetes cluster. You can list all the clusters you have. +2. Delete the Kubernetes cluster. You can list all the clusters you have. .. code-block:: bash @@ -66,7 +70,7 @@ Google Cloud Platform At a minimum, check the following under the Hamburger (left top corner) menu: 1. Compute -> Compute Engine -> Disks - 2. Compute -> Kubernetes Engine -> Container Clusters + 2. Compute -> Kubernetes Engine -> Clusters 3. Tools -> Container Registry -> Images 4. Networking -> Network Services -> Load Balancing @@ -76,8 +80,9 @@ Google Cloud Platform Microsoft Azure AKS ------------------- -1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic steps will - delete the helm chart and delete the hub's namespace. This must be done before proceeding. +1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic + steps will delete the Helm release and the Kubernetes namespace. This must be + done before proceeding. 2. Delete your resource group. You can list your active resource groups with the following command @@ -105,29 +110,30 @@ Microsoft Azure AKS Amazon Web Services (AWS) ------------------------- -1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic steps will - delete the helm chart and delete the hub's namespace. This must be done before proceeding. +1. Perform the steps in :ref:`delete-namespace`. These cloud provider agnostic + steps will delete the Helm release and the Kubernetes namespace. This must be + done before proceeding. 2. on CI host: .. code-block:: bash kops delete cluster --yes - exit #(leave CI host) - Terminicate CI Host - aws ec2 stop-instances --instance-ids - aws ec2 terminate-instances --instance-ids - -.. note:: - cluster name was set as an environment var aka: `NAME=.k8s.local` - Stopping the CI host will still incur disk storage and IP address costs, but - the host can be restarted at a later date to resume using. + # Leave CI host + exit + + # Terminate CI host + aws ec2 stop-instances --instance-ids + aws ec2 terminate-instances --instance-ids +.. note:: + * ```` should be ``.k8s.local``. -.. note:: + * Stopping the CI host will still incur disk storage and IP address costs, + but the host can be restarted at a later date. - Sometimes AWS fails to delete parts of the stack on a first pass. Be sure - to double-check that your stack has in fact been deleted, and re-perform - the actions above if needed. + * Sometimes AWS fails to delete parts of the stack on a first pass. Be sure + to double-check that your stack has in fact been deleted, and re-perform + the actions above if needed. diff --git a/doc/source/upgrading.md b/doc/source/upgrading.md index 406a7efb11..25b0a273a3 100644 --- a/doc/source/upgrading.md +++ b/doc/source/upgrading.md @@ -1,4 +1,4 @@ -# Upgrading your JupyterHub Kubernetes deployment +# Upgrading your Helm chart This page covers best-practices in upgrading your JupyterHub deployment via updates to the Helm Chart. diff --git a/doc/source/user-environment.rst b/doc/source/user-environment.rst index 81c6c7f673..f71b21209a 100644 --- a/doc/source/user-environment.rst +++ b/doc/source/user-environment.rst @@ -1,55 +1,46 @@ .. _user-environment: -Customizing the User Environment -================================ +Customizing User Environment +============================ .. note:: - For a list of all the options you can configure with your helm - chart, see the :ref:`helm-chart-configuration-reference`. + For a list of all the options you can configure with your Helm chart, see the + :ref:`helm-chart-configuration-reference`. -This page contains instructions for a few common ways you can extend the -user experience for your kubernetes deployment. +This page contains instructions for a few common ways you can enhance the user +experience for the users of your JupyterHub deployment. -The **user environment** is the set of packages, environment variables, and -various files that are present when the user logs into JupyterHub. The user may -also see different tools that provide interfaces to perform specialized tasks, -such as RStudio, RISE, JupyterLab, and others. +The *user environment* is the set of software packages, environment variables, +and various files that are present when the user logs into JupyterHub. The user +may also see different tools that provide interfaces to perform specialized +tasks, such as RStudio, JupyterLab, RISE and others. -Usually a :term:`docker image` specifies the functionality and -environment that you wish to provide to users. The following sections will describe -how to use existing Docker images, how to create custom images, and how to set -environment variables. +A :term:`docker image` built from a ``Dockerfile`` will lay the foundation for +the environment that you will provide for the users. The following sections will +describe how to find and use existing Docker images, how to build custom images, +and how to set environment variables. .. _existing-docker-image: Use an existing Docker image ---------------------------- -.. note:: - - The Docker image you are using must have the ``jupyterhub`` package - installed in order to work. Moreover, the version of ``jupyterhub`` must - match the version installed by the helm chart that you're using. For example, - ``v0.6`` of the helm chart uses ``jupyterhub==0.8.1``. - -.. note:: - - You can find the configuration for the default Docker image used in this - guide `here `_. - -Using an existing Docker image, that someone else has written and maintained, -is the simplest approach. For example, Project Jupyter maintains the -`jupyter/docker-stacks `_ repo, -which contains ready to use Docker images. Each image includes a set of -commonly used science and data science libraries and tools. +Using an existing Docker image, that someone else has built and maintained, is +the simplest approach. For example, Project Jupyter maintains the +`jupyter/docker-stacks `_ repo, which +contains ready to use Docker images. Each image includes a set of commonly used +science and data science libraries and tools. -The `scipy-notebook `_ -image, which can be found in the ``docker-stacks`` repo, contains -`useful scientific programming libraries +The `scipy-notebook `_ image +for example, which can be found in the `docker-stacks +`_ repo, contains `useful scientific +programming libraries `_ -pre-installed. This image may satisfy your needs. If you wish to use an -existing image, such as the ``scipy-notebook`` image, complete these steps: +pre-installed. This image may satisfy your needs. + +If you wish to use an existing image, such as the ``scipy-notebook`` image, +complete these steps: 1. Modify your ``config.yaml`` file to specify the image. For example: @@ -58,43 +49,48 @@ existing image, such as the ``scipy-notebook`` image, complete these steps: singleuser: image: name: jupyter/scipy-notebook - tag: c7fb6660d096 + tag: 135a595d2a93 .. note:: Container image names cannot be longer than 63 characters. - Always use an explicit ``tag``, such as a specific commit. - - Avoid using ``latest``. Using ``latest`` might cause a several minute - delay, confusion, or failures for users when a new version of the image - is released. + Always use an explicit ``tag``, such as a specific commit. Avoid using + ``latest``. Using ``latest`` might cause a several minute delay, + confusion, or failures for users when a new version of the image is + released. 2. Apply the changes by following the directions listed in - `apply the changes`_. These directions will **pre-pull** the image to all - the nodes in your cluster. This process may take several minutes to - complete. - -.. note:: - - Docker images must have the ``jupyterhub`` package installed within them to - be used in this manner. + `apply the changes`_. If you have *prePuller.hook.enabled*, all the nodes in + your cluster will pull the image before the actual upgrade of the hub starts. + This process may take several minutes to complete. .. _r2d-custom-image: Build a custom Docker image with ``repo2docker`` ------------------------------------------------ -If you can't find a pre-existing image that suits your needs, you can -create your own image. The easiest way to do this is with the package +.. note:: + + Docker images to be used this way must have the ``jupyterhub`` package of a + matching version with the Helm chart. This documentation is for Helm chart + ``v0.7``, and it uses JupyterHub version ``0.9.1``. + +.. note:: + + You can find the configuration for the default Docker image used in this + guide `here `_. + +If you can't find a pre-existing image that suits your needs, you can create +your own image. The easiest way to do this is with the package :term:`repo2docker`. .. note:: `repo2docker `_ lets you quickly - convert a GitHub repository into a Docker image that can be used as a base - for your JupyterHub instance. Anything inside the GitHub repository - will exist in a user’s environment when they join your JupyterHub: + convert a Git repository into a Docker image that can be used as a base for + your JupyterHub instance. Anything inside the Git repository will exist in a + user’s environment when they join your JupyterHub: - If you include a ``requirements.txt`` file in the root level of the repository, ``repo2docker`` will ``pip install`` the specified packages @@ -125,35 +121,35 @@ how to configure JupyterHub to build off of this image: pip install --user jupyter-repo2docker -3. **Create (or find) a GitHub repository you want to use.** This repo should - have all materials that you want your users to be able to use. You may want - to include a `pip`_ ``requirements.txt`` file to list packages, one per - file line, to install such as when using ``pip install``. Specify the - versions explicitly so the image is fully reproducible. An example - ``requirements.txt`` follows: +3. **Create (or find) a Git repository you want to use.** - .. code-block:: bash + This repo should have all materials that you want your users to be able to + use. You may want to include a `pip`_ ``requirements.txt`` file to list + packages, one per file line, to install such as when using ``pip install``. + Specify the versions explicitly so the image is fully reproducible. An + example ``requirements.txt`` follows: - jupyterhub==0.8.* - numpy==1.12.1 - scipy==0.19.0 - matplotlib==2.0 + .. code-block:: bash - As noted above, the requirements must include ``jupyterhub``, pinned to a - version compatible with the version of JupyterHub used by Helm chart. + jupyterhub==0.9.1 + numpy==1.14.3 + scipy==1.1.0 + matplotlib==2.2.2 4. **Use repo2docker to build a Docker image.** .. code-block:: bash - jupyter-repo2docker --user-name=jovyan --image=gcr.io//: --no-run + TODO: CONTINUE DOCUMENTATION REVIEW FROM HERE + + jupyter-repo2docker --user-name=jovyan --image=gcr.io//: --no-run - This tells ``repo2docker`` to fetch ``master`` of the GitHub repository, - and uses heuristics to build a docker image of it. + This tells ``repo2docker`` to fetch ``master`` of the Git repository, and + uses heuristics to build a Docker image of it. .. note:: - - The project name should match your google cloud project's name. + - The project name should match your Google cloud project's name. - Don’t use underscores in your image name. Other than this, the name can be anything memorable. *This bug with underscores will be fixed soon.* - The tag should be the first 6 characters of the SHA in the GitHub @@ -376,5 +372,5 @@ across sessions. To resolve this, take the following steps: folder, which will persist across sessions. .. _apply the changes: extending-jupyterhub.html#apply-config-changes -.. _downloading and installing Docker: https://store.docker.com/search?offering=community&platform=desktop%2Cserver&q=&type=edition +.. _downloading and installing Docker: https://www.docker.com/community-edition .. _pip: https://pip.readthedocs.io/en/latest/user_guide/#requirements-files diff --git a/doc/source/user-management.md b/doc/source/user-management.md index 10f1fdc428..754125d3e8 100644 --- a/doc/source/user-management.md +++ b/doc/source/user-management.md @@ -1,4 +1,4 @@ -# User Management +# Customizing User Management This section describes management of users and their permissions on JupyterHub. diff --git a/doc/source/user-resources.rst b/doc/source/user-resources.rst index 41c655d08e..264fb0019e 100644 --- a/doc/source/user-resources.rst +++ b/doc/source/user-resources.rst @@ -1,7 +1,7 @@ .. _user-resources: -User Resources -============== +Customizing User Resources +========================== .. note:: diff --git a/doc/source/user-storage.md b/doc/source/user-storage.md index 3456adac25..3c5908af5a 100644 --- a/doc/source/user-storage.md +++ b/doc/source/user-storage.md @@ -2,8 +2,7 @@ .. _user-storage: ``` -# User storage in JupyterHub - +# Customizing User Storage For the purposes of this guide, we'll describe "storage" as a "volume" - a location on a disk where a user's data resides. diff --git a/jupyterhub/schema.yaml b/jupyterhub/schema.yaml index 23943086fa..9cbbddaaed 100644 --- a/jupyterhub/schema.yaml +++ b/jupyterhub/schema.yaml @@ -33,7 +33,7 @@ properties: description: | Set the imagePullPolicy on the hub pod. - See [the kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images) + See [the Kubernetes docs](https://kubernetes.io/docs/concepts/containers/images/#updating-images) for more info on what the values mean. image: type: object @@ -166,7 +166,7 @@ properties: description: | Extra labels to add to the hub pod. - See [the kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + See [the Kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to learn more about labels. extraEnv: type: list @@ -417,18 +417,18 @@ properties: - string - "null" description: | - The tag will default to the clusters kubernetes version. + The tag will default to the clusters Kubernetes version. - https://console.cloud.google.com/gcr/images/google-containers/GLOBAL/kube-scheduler-amd64 podPriority: type: object description: | - Generally available since kubernetes 1.11, Pod Priority is used to + Generally available since Kubernetes 1.11, Pod Priority is used to allow real users evict placeholder pods. properties: enabled: type: bool description: | - Generally available since kubernetes 1.11, Pod Priority is used to + Generally available since Kubernetes 1.11, Pod Priority is used to allow real users evict placeholder pods. userPlaceholder: type: object diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index 8b4073726e..c493f5ed67 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -214,7 +214,7 @@ scheduling: replicas: 1 image: name: gcr.io/google_containers/kube-scheduler-amd64 - tag: v1.11.1-beta.0 + tag: v1.11.1 podPriority: enabled: false userPlaceholder: @@ -224,6 +224,7 @@ scheduling: userDummy: enabled: true replicas: 0 + resources: corePods: nodeAffinity: matchNodePurpose: "prefer"