diff --git a/python/hsfs/constructor/query.py b/python/hsfs/constructor/query.py index 0270c20f4e..cace781835 100644 --- a/python/hsfs/constructor/query.py +++ b/python/hsfs/constructor/query.py @@ -260,6 +260,9 @@ def as_of( ): """Perform time travel on the given Query. + !!! warning "Pyspark/Spark Only" + Apache HUDI exclusively supports Time Travel and Incremental Query via Spark Context + This method returns a new Query object at the specified point in time. Optionally, commits before a specified point in time can be excluded from the query. The Query can then either be read into a Dataframe or used further to perform joins or construct a training dataset. diff --git a/python/hsfs/feature_group.py b/python/hsfs/feature_group.py index 63e960469a..898e655fac 100644 --- a/python/hsfs/feature_group.py +++ b/python/hsfs/feature_group.py @@ -2110,7 +2110,11 @@ def read_changes( `as_of(end_wallclock_time, exclude_until=start_wallclock_time).read(read_options=read_options)` instead. - This function only works on feature groups with `HUDI` time travel format. + !!! warning "Pyspark/Spark Only" + Apache HUDI exclusively supports Time Travel and Incremental Query via Spark Context + + !!! warning + This function only works for feature groups with time_travel_format='HUDI'. # Arguments start_wallclock_time: Start time of the time travel query. Strings should be formatted in one of the following formats `%Y-%m-%d`, `%Y-%m-%d %H`, `%Y-%m-%d %H:%M`, @@ -2847,6 +2851,9 @@ def as_of( ): """Get Query object to retrieve all features of the group at a point in the past. + !!! warning "Pyspark/Spark Only" + Apache HUDI exclusively supports Time Travel and Incremental Query via Spark Context + This method selects all features in the feature group and returns a Query object at the specified point in time. Optionally, commits before a specified point in time can be excluded from the query. The Query can then either be read into a Dataframe