Skip to content

Commit

Permalink
update gradio version (#868)
Browse files Browse the repository at this point in the history
  • Loading branch information
kramstrom authored Sep 17, 2024
1 parent ce48140 commit 7b5658d
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 10 deletions.
2 changes: 1 addition & 1 deletion 06_gpu_and_ml/dreambooth/dreambooth_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
"accelerate==0.31.0",
"datasets~=2.13.0",
"ftfy~=6.1.0",
"gradio~=3.50.2",
"gradio~=4.29.0",
"smart_open~=6.4.0",
"transformers~=4.41.2",
"sentencepiece>=0.1.91,!=0.1.92",
Expand Down
23 changes: 16 additions & 7 deletions 06_gpu_and_ml/vision_model_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@
from typing import List, Optional, Tuple

import modal
from fastapi import FastAPI

web_app = FastAPI()
assets_path = pathlib.Path(__file__).parent / "vision_model_training" / "assets"
app = modal.App(name="example-fastai-wandb-gradio-cifar10-demo")
image = modal.Image.debian_slim(python_version="3.10").pip_install(
"fastai~=2.7.9",
"gradio~=3.6.0",
"httpx~=0.23.0",
"gradio~=4.29.0",
"httpx~=0.24.1",
# When using pip PyTorch is not automatically installed by fastai.
"torch~=1.12.1",
"torchvision~=0.13.1",
Expand Down Expand Up @@ -119,15 +117,15 @@ def download_dataset():
# Fine-tuning the base ResNet model takes about 30-40 minutes on a GPU. To avoid
# needing to keep our terminal active, we can run training as a 'detached run'.
#
# `MODAL_GPU=any modal run --detach vision_model_training.py::app.train`
# `MODAL_GPU=any modal run --detach vision_model_training.py::train`
#


@app.function(
image=image,
gpu=USE_GPU,
volumes={str(MODEL_CACHE): volume},
secrets=[modal.Secret.from_name("my-wandb-secret")],
secrets=[modal.Secret.from_name("wandb")],
timeout=2700, # 45 minutes
)
def train():
Expand Down Expand Up @@ -285,20 +283,31 @@ def create_demo_examples() -> List[str]:
image=image,
volumes={str(MODEL_CACHE): volume},
mounts=[modal.Mount.from_local_dir(assets_path, remote_path="/assets")],
allow_concurrent_inputs=100,
concurrency_limit=1,
)
@modal.asgi_app()
def fastapi_app():
import gradio as gr
from fastapi import FastAPI
from gradio.routes import mount_gradio_app

classifier = ClassifierModel()
interface = gr.Interface(
fn=classifier.predict.remote,
inputs=gr.Image(shape=(224, 224)),
inputs=gr.Image(),
outputs="label",
examples=create_demo_examples(),
css="/assets/index.css",
)

def lifespan(app: FastAPI):
yield
print("closing interface")
interface.close()

web_app = FastAPI(lifespan=lifespan)

return mount_gradio_app(
app=web_app,
blocks=interface,
Expand Down
13 changes: 11 additions & 2 deletions 10_integrations/cloud_bucket_mount_loras.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,11 +242,20 @@ def main(

web_app = FastAPI()
web_image = modal.Image.debian_slim().pip_install(
"gradio~=3.50.2", "pillow~=10.2.0"
"gradio~=4.29.0", "pillow~=10.2.0"
)


@app.function(image=web_image, keep_warm=1, container_idle_timeout=60 * 20)
@app.function(
image=web_image,
keep_warm=1,
container_idle_timeout=60 * 20,
# gradio requires sticky sessions
# so we limit the number of concurrent containers to 1
# and allows it to scale to 100 concurrent inputs
allow_concurrent_inputs=100,
concurrency_limit=1,
)
@modal.asgi_app()
def ui():
"""A simple Gradio interface around our LoRA inference."""
Expand Down

0 comments on commit 7b5658d

Please sign in to comment.