Anaconda3, Jupyter Notebook, OpenCV3, TensorFlow and Keras2 for Deep Learning
- Run with docker (image:
okwrtdsh/anaconda3:keras-cpu
)
$ docker run -v $(pwd):/src/notebooks -p 8888:8888 -td okwrtdsh/anaconda3:keras-cpu
- Open
http://localhost:8888
in web browser
- Run with nvidia-docker (image:
okwrtdsh/anaconda3:keras-10.0-cudnn7
)
$ nvidia-docker run -v $(pwd):/src/notebooks -p 8888:8888 -td okwrtdsh/anaconda3:keras-10.0-cudnn7
- Open
http://localhost:8888
in web browser
- docker-compose.yml (image:
okwrtdsh/anaconda3:keras-cpu
)
version: '3'
services:
jupyter:
image: okwrtdsh/anaconda3:keras-cpu
ports:
- '8888:8888'
volumes:
- ./notebooks:/src/notebooks
- Run with docker-compose
$ docker-compose up -d
- Open
http://localhost:8888
in web browser
- docker-compose.yml (image:
okwrtdsh/anaconda3:keras-10.0-cudnn7
)
version: '3'
services:
jupyter:
image: okwrtdsh/anaconda3:keras-10.0-cudnn7
ports:
- '8888:8888'
volumes:
- ./notebooks:/src/notebooks
- Run with nvidia-docker
# Run with nvidia-docker-compose (nvidia-docker v1)
$ nvidia-docker-compose up -d
# Run with docker-compose (nvidia-docker v2)
$ docker-compose up -d
- Open
http://localhost:8888
in web browser
Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you should increase shared memory size either with
--ipc=host
or--shm-size
command line options to nvidia-docker run.
$ nvidia-docker run --ipc=host -v $(pwd):/src/notebooks -p 8888:8888 -td okwrtdsh/anaconda3:pytorch-10.0-cudnn7
version: '3'
services:
jupyter:
image: okwrtdsh/anaconda3:pytorch-10.0-cudnn7
ipc: host
ports:
- '8888:8888'
volumes:
- ./notebooks:/src/notebooks
# Run with nvidia-docker-compose (nvidia-docker v1)
$ nvidia-docker-compose up -d
# Run with docker-compose (nvidia-docker v2)
$ docker-compose up -d