diff --git a/06_gpu_and_ml/llm-serving/trtllm_llama.py b/06_gpu_and_ml/llm-serving/trtllm_llama.py index cd0282cc9..1fb053484 100644 --- a/06_gpu_and_ml/llm-serving/trtllm_llama.py +++ b/06_gpu_and_ml/llm-serving/trtllm_llama.py @@ -567,7 +567,7 @@ def main(): class GenerateRequest(pydantic.BaseModel): prompts: list[str] - settings: Optional[dict] + settings: Optional[dict] = None @app.function(image=web_image) diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference.py b/10_integrations/dbt_modal_inference/dbt_modal_inference.py new file mode 100644 index 000000000..d242756bd --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference.py @@ -0,0 +1,212 @@ +# # LLM inference within your data warehouse using dbt python models +# +# In this example we demonstrate how you could combine [dbt's python models](https://docs.getdbt.com/docs/build/python-models) +# with LLM inference models powered by Modal, allowing you to run serverless gpu workloads within dbt. +# +# This example runs [dbt](https://docs.getdbt.com/docs/introduction) with a [DuckDB](https://duckdb.org) +# backend directly on top of Modal, but could be translated to run on any dbt-compatible +# database that supports python models. Similarly you could make these requests from UDFs +# directly in SQL instead if you don't want to use dbt's python models. +# +# In this example we use an LLM deployed in a previous example: [Serverless TensorRT-LLM (LLaMA 3 8B)](https://modal.com/docs/examples/trtllm_llama) +# but you could easily swap this for whichever Modal Function you wish. We use this to classify the sentiment +# for free-text product reviews and aggregate them in subsequent dbt sql models. These product names, descriptions and reviews +# were also generated by an LLM running on Modal! +# +# ## Configure Modal and dbt +# +# We set up the environment variables necessary for dbt and +# create a slim debian and install the packages necessary to run. + +import pathlib + +import modal + +LOCAL_DBT_PROJECT = ( # local path + pathlib.Path(__file__).parent / "dbt_modal_inference_proj" +) +PROJ_PATH = "/root/dbt" # remote paths +VOL_PATH = "/root/vol" +DB_PATH = f"{VOL_PATH}/db" +PROFILES_PATH = "/root/dbt_profile" +TARGET_PATH = f"{VOL_PATH}/target" + +# We also define the environment our application will run in -- +# a container image, similar to Docker. +# See [this guide](https://modal.com/docs/guide/custom-container) for details. + +dbt_image = ( # start from a slim Linux image + modal.Image.debian_slim() + .pip_install( # install python packages + "dbt-duckdb==1.8.1", # dbt with duckdb connector + "pandas==2.2.2", # dataframes + "pyarrow==17.0.0", # columnar data lib + "requests==2.32.3", # http library + ) + .env( # configure dbt environment variables + { + "DBT_PROJECT_DIR": PROJ_PATH, + "DBT_PROFILES_DIR": PROFILES_PATH, + "DBT_TARGET_PATH": TARGET_PATH, + "DB_PATH": DB_PATH, + } + ) +) + +app = modal.App("duckdb-dbt-inference", image=dbt_image) + +# We mount the local code and configuration into the Modal Function +# so that it will be available when we run dbt +# and create a volume so that we can persist our data. + +dbt_project = modal.Mount.from_local_dir( + LOCAL_DBT_PROJECT, remote_path=PROJ_PATH +) +dbt_profiles = modal.Mount.from_local_file( + local_path=LOCAL_DBT_PROJECT / "profiles.yml", + remote_path=pathlib.Path(PROFILES_PATH, "profiles.yml"), +) +dbt_vol = modal.Volume.from_name("dbt-inference-vol", create_if_missing=True) + +# ## Run dbt in a serverless Modal Function +# +# With Modal it's easy to run python code serverless +# and with dbt's [programmatic invocations](https://docs.getdbt.com/reference/programmatic-invocations) +# you can easily run dbt from python instead of using the command line +# +# Using the above configuration we can invoke dbt from Modal +# and use this to run transformations in our warehouse +# The `dbt_run` function does a few things, it: +# 1. creates the directories for storing the DuckDB database and dbt target files +# 2. gets a reference to a deployed Modal Function that serves an LLM inference endpoint +# 3. runs dbt with a variable for the inference url +# 4. prints the output of the final dbt table in the DuckDB parquet output + + +@app.function( + mounts=[dbt_project, dbt_profiles], + volumes={VOL_PATH: dbt_vol}, +) +def dbt_run() -> None: + import os + + import duckdb + from dbt.cli.main import dbtRunner + + os.makedirs(DB_PATH, exist_ok=True) + os.makedirs(TARGET_PATH, exist_ok=True) + + # Remember to either deploy this yourself in your environment + # or change to another web endpoint you have + ref = modal.Function.lookup( + "example-trtllm-Meta-Llama-3-8B-Instruct", "generate_web" + ) + + res = dbtRunner().invoke( + ["run", "--vars", f"{{'inference_url': '{ref.web_url}'}}"] + ) + if res.exception: + print(res.exception) + + duckdb.sql( + f"select * from '{DB_PATH}/product_reviews_sentiment_agg.parquet';" + ).show() + + +# Running the Modal Function: +# `modal run dbt_modal_inference.py` +# will result in something like: +# +# ``` +# 21:25:21 Running with dbt=1.8.4 +# 21:25:21 Registered adapter: duckdb=1.8.1 +# 21:25:23 Found 5 models, 2 seeds, 6 data tests, 2 sources, 408 macros +# 21:25:23 +# 21:25:23 Concurrency: 1 threads (target='dev') +# 21:25:23 +# 21:25:23 1 of 5 START sql table model main.stg_products ................................. [RUN] +# 21:25:23 1 of 5 OK created sql table model main.stg_products ............................ [OK in 0.22s] +# 21:25:23 2 of 5 START sql table model main.stg_reviews .................................. [RUN] +# 21:25:23 2 of 5 OK created sql table model main.stg_reviews ............................. [OK in 0.17s] +# 21:25:23 3 of 5 START sql table model main.product_reviews .............................. [RUN] +# 21:25:23 3 of 5 OK created sql table model main.product_reviews ......................... [OK in 0.17s] +# 21:25:23 4 of 5 START python external model main.product_reviews_sentiment .............. [RUN] +# 21:25:32 4 of 5 OK created python external model main.product_reviews_sentiment ......... [OK in 8.83s] +# 21:25:32 5 of 5 START sql external model main.product_reviews_sentiment_agg ............. [RUN] +# 21:25:32 5 of 5 OK created sql external model main.product_reviews_sentiment_agg ........ [OK in 0.16s] +# 21:25:32 +# 21:25:32 Finished running 3 table models, 2 external models in 0 hours 0 minutes and 9.76 seconds (9.76s). +# 21:25:33 +# 21:25:33 Completed successfully +# 21:25:33 +# 21:25:33 Done. PASS=5 WARN=0 ERROR=0 SKIP=0 TOTAL=5 +# ┌──────────────┬──────────────────┬─────────────────┬──────────────────┐ +# │ product_name │ positive_reviews │ neutral_reviews │ negative_reviews │ +# │ varchar │ int64 │ int64 │ int64 │ +# ├──────────────┼──────────────────┼─────────────────┼──────────────────┤ +# │ Splishy │ 3 │ 0 │ 1 │ +# │ Blerp │ 3 │ 1 │ 1 │ +# │ Zinga │ 2 │ 0 │ 0 │ +# │ Jinkle │ 2 │ 1 │ 1 │ +# │ Flish │ 2 │ 2 │ 1 │ +# │ Kablooie │ 2 │ 1 │ 1 │ +# │ Wizzle │ 2 │ 1 │ 0 │ +# │ Snurfle │ 2 │ 1 │ 0 │ +# │ Glint │ 2 │ 0 │ 0 │ +# │ Flumplenook │ 2 │ 1 │ 1 │ +# │ Whirlybird │ 2 │ 0 │ 1 │ +# ├──────────────┴──────────────────┴─────────────────┴──────────────────┤ +# │ 11 rows 4 columns │ +# └──────────────────────────────────────────────────────────────────────┘ +# ``` +# +# Here we can see that the LLM classified the results into three different categories +# that we could then aggregate in a subsequent sql model! +# +# ## Python dbt model +# +# The python dbt model in [`dbt_modal_inference_proj/models/product_reviews_sentiment.py`](https://github.com/modal-labs/modal-examples/blob/main/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment.py) is quite simple. +# +# It defines a python dbt model that reads a record batch of product reviews, +# generates a prompt for each review and makes an inference call to a Modal Function +# that serves an LLM inference endpoint. It then stores the output in a new column +# and writes the data to a parquet file. +# +# And it's that simple to call a Modal web endpoint from dbt! +# +# ## View the stored output +# +# Since we're using a [Volume](https://modal.com/docs/guide/volumes) for storing our dbt target results +# and our DuckDB parquet files +# you can view the results and use them outside the Modal Function too. +# +# View the target directory by: +# ```sh +# modal volume ls dbt-inference-vol target/ +# Directory listing of 'target/' in 'dbt-inference-vol' +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━┓ +# ┃ Filename ┃ Type ┃ Created/Modified ┃ Size ┃ +# ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━┩ +# │ target/run │ dir │ 2024-07-19 22:59 CEST │ 14 B │ +# │ target/compiled │ dir │ 2024-07-19 22:59 CEST │ 14 B │ +# │ target/semantic_manifest.json │ file │ 2024-07-19 23:25 CEST │ 234 B │ +# │ target/run_results.json │ file │ 2024-07-19 23:25 CEST │ 10.1 KiB │ +# │ target/manifest.json │ file │ 2024-07-19 23:25 CEST │ 419.7 KiB │ +# │ target/partial_parse.msgpack │ file │ 2024-07-19 23:25 CEST │ 412.7 KiB │ +# │ target/graph_summary.json │ file │ 2024-07-19 23:25 CEST │ 1.4 KiB │ +# │ target/graph.gpickle │ file │ 2024-07-19 23:25 CEST │ 15.7 KiB │ +# └───────────────────────────────┴──────┴───────────────────────┴───────────┘ +# ``` +# +# And the db directory: +# ```sh +# modal volume ls dbt-inference-vol db/ +# Directory listing of 'db/' in 'dbt-inference-vol' +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┓ +# ┃ Filename ┃ Type ┃ Created/Modified ┃ Size ┃ +# ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━┩ +# │ db/review_sentiments.parquet │ file │ 2024-07-19 23:25 CEST │ 9.6 KiB │ +# │ db/product_reviews_sentiment_agg.parquet │ file │ 2024-07-19 23:25 CEST │ 756 B │ +# └──────────────────────────────────────────┴──────┴───────────────────────┴─────────┘ +# ``` +# diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/.gitignore b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/.gitignore new file mode 100644 index 000000000..49f147cb9 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/.gitignore @@ -0,0 +1,4 @@ + +target/ +dbt_packages/ +logs/ diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/db/.gitkeep b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/db/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/dbt_project.yml b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/dbt_project.yml new file mode 100644 index 000000000..e0333f31e --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/dbt_project.yml @@ -0,0 +1,29 @@ +name: "sentiment_shop" +version: "1.0.0" +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: "modal" + +# These configurations specify where dbt should look for different types of files. +# The `model-paths` config, for example, states that models in this project can be +# found in the "models/" directory. You probably won't need to change these! +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] +snapshot-paths: ["snapshots"] + +target-path: "target" # directory which will store compiled SQL files +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models +models: + +materialized: table + +seeds: + diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/models.yml b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/models.yml new file mode 100644 index 000000000..481005e76 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/models.yml @@ -0,0 +1,12 @@ +version: 2 + +models: + - name: product_reviews_sentiment + config: + materialized: external + location: "{{ env_var('DB_PATH') }}/product_reviews_sentiment.parquet" + inference_url: "{{ var('inference_url') }}" + - name: product_reviews_sentiment_agg + config: + materialized: external + location: "{{ env_var('DB_PATH') }}/product_reviews_sentiment_agg.parquet" diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews.sql b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews.sql new file mode 100644 index 000000000..50bd2eb2d --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews.sql @@ -0,0 +1,24 @@ +with products as ( + + select * from {{ ref('stg_products') }} + +), + +reviews as ( + + select * from {{ ref('stg_reviews') }} + +), + +product_reviews as ( + + select + p.id as product_id, + p.name as product_name, + p.description as product_description, + r.review as product_review + from products p + left join reviews r on p.id = r.product_id +) + +select * from product_reviews diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment.py b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment.py new file mode 100644 index 000000000..1bce2fdf9 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment.py @@ -0,0 +1,94 @@ +import json + +import pyarrow as pa +import requests + + +def get_prompt(review): + """ + This function takes a review and returns a prompt for the review sentiment classification. + + Args: + review: A product review. + + Returns: + A prompt for the review sentiment classification. + """ + return ( + """ +You are an expert at analyzing product reviews sentiment. +Your task is to classify the given product review into one of the following labels: ["positive", "negative", "neutral"] +Here are some examples: +1. "example": "Packed with innovative features and reliable performance, this product exceeds expectations, making it a worthwhile investment." + "label": "positive" +2. "example": "Despite promising features, the product's build quality and performance were disappointing, failing to meet expectations." + "label": "negative" +3. "example": "While the product offers some useful functionalities, its overall usability and durability may vary depending on individual needs and preferences." + "label": "neutral" +Label the following review: +""" + + '"' + + review + + '"' + + """ +Respond in a single word with the label. +""" + ) + + +def batcher(batch_reader: pa.RecordBatchReader, inference_url: str): + """ + This function takes a batch reader and an inference url and yields a record batch with the review sentiment. + + Args: + batch_reader: A record batch reader. + inference_url: The url of the inference service. + + Yields: + A record batch with the review sentiment. + """ + for batch in batch_reader: + df = batch.to_pandas() + + prompts = ( + df["product_review"] + .apply(lambda review: get_prompt(review)) + .tolist() + ) + + res = ( + requests.post( # request to the inference service running on Modal + inference_url, + json={"prompts": prompts}, + ) + ) + + df["review_sentiment"] = json.loads(res.content) + + yield pa.RecordBatch.from_pandas(df) + + +def model(dbt, session): + """ + This function defines the model for the product reviews sentiment. + + Args: + dbt: The dbt object. + session: The session object. + + Returns: + A record batch reader with the review sentiment. + """ + dbt.config( + materialized="external", + location="/root/vol/db/review_sentiments.parquet", + ) + inference_url = dbt.config.get("inference_url") + + big_model = dbt.ref("product_reviews") + batch_reader = big_model.record_batch(100) + batch_iter = batcher(batch_reader, inference_url) + new_schema = batch_reader.schema.append( + pa.field("review_sentiment", pa.string()) + ) + return pa.RecordBatchReader.from_batches(new_schema, batch_iter) diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment_agg.sql b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment_agg.sql new file mode 100644 index 000000000..38bd86946 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/product_reviews_sentiment_agg.sql @@ -0,0 +1,39 @@ +with product_reviews_sentiment as ( + + select + product_id, + product_name, + product_description, + product_review, + review_sentiment, + from {{ ref('product_reviews_sentiment') }} +), + +clean as ( + + select + product_id, + product_name, + product_description, + product_review, + case when regexp_matches(review_sentiment, 'positive', 'i') then 'positive' else null end AS positive_reviews, + case when regexp_matches(review_sentiment, 'neutral', 'i') then 'neutral' else null end AS neutral_reviews, + case when regexp_matches(review_sentiment, 'negative', 'i') then 'negative' else null end AS negative_reviews + from product_reviews_sentiment + +), + +aggregated as ( + + select + product_name, + count(positive_reviews) as positive_reviews, + count(neutral_reviews) as neutral_reviews, + count(negative_reviews) as negative_reviews + from clean + group by 1 + order by 2 desc + +) + +select * from aggregated \ No newline at end of file diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/sources.yml b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/sources.yml new file mode 100644 index 000000000..4803fd041 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/sources.yml @@ -0,0 +1,9 @@ +version: 2 + +sources: + - name: external_source + meta: + external_location: "{{ env_var('DBT_PROJECT_DIR') }}/seeds/{name}.csv" + tables: + - name: raw_reviews + - name: raw_products diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/schema.yml b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/schema.yml new file mode 100644 index 000000000..1edd0baa5 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/schema.yml @@ -0,0 +1,23 @@ +version: 2 + +models: + - name: stg_products + columns: + - name: id + tests: + - not_null + - unique + - name: name + tests: + - not_null + - name: description + tests: + - not_null + - name: stg_reviews + columns: + - name: product_id + tests: + - not_null + - name: review + tests: + - not_null diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_products.sql b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_products.sql new file mode 100644 index 000000000..534a7a8d9 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_products.sql @@ -0,0 +1,7 @@ +with source as ( + + select * from {{ source('external_source', 'raw_products') }} + +) + +select * from source diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_reviews.sql b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_reviews.sql new file mode 100644 index 000000000..421890d04 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/models/staging/stg_reviews.sql @@ -0,0 +1,7 @@ +with source as ( + + select * from {{ source('external_source', 'raw_reviews') }} + +) + +select * from source diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/profiles.yml b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/profiles.yml new file mode 100644 index 000000000..9479d1087 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/profiles.yml @@ -0,0 +1,5 @@ +modal: + outputs: + dev: + type: duckdb + target: dev diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/.gitkeep b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_products.csv b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_products.csv new file mode 100644 index 000000000..ca656ba13 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_products.csv @@ -0,0 +1,12 @@ +id,name,description +11385242,Flumplenook,A revolutionary new kitchen gadget that makes cooking easier +19246147,Snurfle,A line of scented candles with unique fragrances inspired by the great outdoors +91080316,Zinga,A high-tech smartwatch that tracks your daily activities and provides personalized recommendations for improvement +46814909,Flish,A waterproof Bluetooth speaker designed for use in the shower or pool +76407676,Jinkle,A line of eco-friendly cleaning products made from natural ingredients +17532321,Wizzle,"A portable, handheld device that converts any surface into a mini trampoline" +26171214,Kablooie,A subscription service that delivers a monthly selection of artisanal cheeses to your doorstep +74232095,Splishy,A water-resistant phone case designed for use in extreme weather conditions +90587663,Glint,"A line of sparkly, edible decorations for cakes and cupcakes" +65134363,Blerp,A social media platform specifically designed for cat owners to share photos and stories about their feline friends +34290419,Whirlybird,A toy helicopter that can be controlled using hand gestures diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_reviews.csv b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_reviews.csv new file mode 100644 index 000000000..c75dcfaa2 --- /dev/null +++ b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/seeds/raw_reviews.csv @@ -0,0 +1,30 @@ +product_id,review +11385242,"I've had some issues with jamming, but overall Flumplenook is a great addition to my kitchen. The suction cup on the bottom can be a bit finicky, but once you get the hang of it, it's a game-changer. Love the compact design and the fact that it's dishwasher safe." +11385242,"Unfortunately, I found Flumplenook to be a bit of a learning curve. The instructions were unclear and I ended up with more mess than I started with. However, once I figured it out, it does make cooking faster and more efficient. Just wish they'd include a recipe book or online tutorials to help with getting started." +19246147,"I'm obsessed with Snurfle's new 'Moonlit Meadow' scent! It captures the essence of a warm summer evening, transporting me to a peaceful forest glade every time I light it. The fragrance is subtle yet alluring, making it perfect for a relaxing bath or meditation session." +19246147,"I was wasn't sure about trying a candle with an outdoor-inspired scent, but Snurfle's 'Wildflower Woods' really surprised me! The blend of floral and earthy notes is so unique and refreshing. It's now my go-to candle for springtime vibes in my living room" +19246147,"While I appreciate the creativity behind Snurfle's fragrances, I found the 'Rainforest Rain' scent to be a bit too overpowering for my taste. It's definitely a strong and bold fragrance, but not quite what I expected from a more delicate candle like this. Maybe try a smaller size first before committing to a larger one?" +91080316,"I've been using Zinga for a week now, and I'm obsessed! The watch is sleek and comfortable to wear. The activity tracking feature is so accurate, it even caught me sleeping in 10 minutes longer than I thought I was! The personalized recommendations have helped me stay on track with my goals and I feel more focused throughout the day." +91080316,"I got Zinga as a gift for my husband and he loves it! He's not super tech-savvy, but he found the setup process easy and the features are pretty cool. However, the battery life could be better. We'll see how long it lasts after a full charge, but so far so good!" +46814909,"I was skeptical about this speaker at first, but it's actually really great! The sound quality is surprisingly good and it's so easy to pair with my phone. I've used it in the shower and by the pool and it's been a game-changer. Highly recommend!" +46814909,"This speaker is super convenient for the beach or pool. It's small enough to fit in my bag and the sound is decent. However, it does tend to get a bit distorted when submerged underwater. Still a solid choice for a fun summer accessory." +46814909,"I was expecting more from this speaker, especially considering the price. The sound quality is okay, but it's not as loud as I thought it would be. Also, the battery life could be better. That being said, it's still a cute little thing that's easy to use and looks cool in the water. Maybe worth it if you're looking for something basic." +76407676,"I was skeptical about switching to an eco-friendly cleaner, but Jinkle's gentle formula really won me over! My home has never been cleaner and my skin doesn't react badly to it. Plus, the scents are amazing!" +76407676,"I've tried a lot of natural cleaners before, but Jinkle is one of the best. The plant-based ingredients make me feel good about using them in my home. Only wish they came in bigger sizes!" +76407676,"I'm not usually a fan of strong-smelling cleaners, but Jinkle's subtle scent is actually quite pleasant. However, I do wish they had more variety in their product line. Still, it's nice to know I'm doing something good for the planet." +17532321,"I was skeptical at first, but the Wizzle really delivers! I used it on my bed and it's so much fun. It's like having a mini trampoline in your bedroom! The best part is that it's super easy to set up and take down." +17532321,"The Wizzle is a great way to get some exercise while watching TV or playing video games. It's small enough to fit under my desk, so I can bounce away while I'm working from home. Just be careful not to bounce too high or you might knock things over!" +17532321,"I was expecting more out of the Wizzle, unfortunately. While it's fun to use, it's not as sturdy as I thought it would be. I accidentally dropped it once and it got a little bent. Still, it's a good way to get some low-impact exercise in, especially if you're just starting out with fitness." +26171214,I'm obsessed with Kablooie! The variety is amazing and the quality is top-notch. My favorite so far has been the truffle gouda. Can't wait for next month's delivery! +26171214,"Kablooie is okay, I guess. The cheeses are nice, but sometimes they're not what I expected. Still, it's a fun surprise every month and I like trying new things. Maybe just needs some more variety in their selection." +74232095,"I was skeptical about this case at first, but it's been through a few rough days with me and my phone is still dry as a bone! The Splishy case has kept it safe from water, mud, and even a little bit of sand. Highly recommend!" +74232095,"I've used this case on hikes and camping trips and it's been great so far. It's definitely not perfect, but it's done its job in keeping my phone protected from the elements. Just wish it came with a screen protector too..." +74232095,"I bought this case thinking it would be more durable than it is. After a few drops of water got inside, I had to dry it out with a towel. Not impressed. Maybe it's okay for casual use, but if you're looking for something super rugged, keep looking." +90587663,Glints are so much fun to work with! I used them on a cake for my friend's birthday and it looked amazing. The only thing is that they can be a bit messy to apply. +90587663,I've never seen anything like Glint before! These edible decorations are so sparkly and delicious. I used them on a vanilla cupcake and it tasted like a sweet little piece of heaven. Highly recommend! +65134363,"I'm obsessed with Blerp! As a cat mom, I love sharing adorable pics of my kitty, Luna, and connecting with other cat lovers. The community is so supportive and fun!" +65134363,"Blerp is purr-fectly wonderful! I was skeptical at first, but now I'm hooked on seeing all the cute cat faces and reading funny stories from fellow felines. My cat, Mr. Bigglesworth, even has his own fan club" +65134363,"As a busy cat owner, I appreciate the ease of use on Blerp. However, sometimes the app can be slow to load and I wish there were more features for sharing videos. Still, it's a great way to connect with other cat enthusiasts" +34290419,"I was skeptical at first, but my kids loved this thing! The Whirlybird is so much fun to play with. It's easy to use and the designs on the blades are really cool. My only complaint is that it's a bit loud when it gets going, but overall a great toy!" +34290419,"The Whirlybird is a unique gift idea for my nephew's birthday party. He loves playing with it and it's a great way to get him moving around. The battery life could be better, but overall a good value for the price." +34290419,"I thought I'd love the Whirlybird, but unfortunately it's not as durable as I expected. The plastic feels cheap and the motor is already starting to slow down after just a few uses. Maybe it's just a one-time fluke, but I'm not sure if I would recommend it." diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/snapshots/.gitkeep b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/snapshots/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/tests/.gitkeep b/10_integrations/dbt_modal_inference/dbt_modal_inference_proj/tests/.gitkeep new file mode 100644 index 000000000..e69de29bb