Skip to content

Commit

Permalink
refactor(Jetstream PT): drop HfEngine
Browse files Browse the repository at this point in the history
Now that sampling can be passed as parameter to prefill and generate,
the custom engine is not required anymore.
  • Loading branch information
tengomucho committed Sep 9, 2024
1 parent 7eacf8a commit 1676b67
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 22 deletions.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import jax
from jetstream_pt import fetch_models, torchjax
from jetstream_pt.engine import PyTorchEngine
from jetstream_pt.environment import (
JetEngineEnvironment,
JetEngineEnvironmentData,
Expand All @@ -13,7 +14,6 @@
from loguru import logger
from transformers import AutoConfig, PretrainedConfig

from .engine import HfEngine
from .llama_model_exportable_hf import TransformerHf


Expand Down Expand Up @@ -131,7 +131,7 @@ def create_engine(
sequence_length: int,
max_input_tokens: int,
max_output_tokens: int,
) -> HfEngine:
) -> PyTorchEngine:
# NOTE: for now no quantization is done
env_data = create_engine_env_data(model_path, batch_size, sequence_length, max_input_tokens, max_output_tokens)
if env_data is None:
Expand All @@ -142,7 +142,7 @@ def create_engine(
weight_shardings = model.get_sharding_annotations()
sharded_weights = shard_weights(env, model.state_dict(), weight_shardings)

return HfEngine(
return PyTorchEngine(
pt_model=model,
env=env,
weights=torchjax.from_torch_with_copy(sharded_weights),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import torch
import torch_xla2
from jetstream.engine.token_utils import pad_tokens, take_nearest_length
from jetstream_pt.engine import PyTorchEngine
from loguru import logger
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from transformers.generation import GenerationConfig
Expand All @@ -27,7 +28,6 @@
StoppingCriteriaParameters,
Tokens,
)
from .engine import HfEngine
from .engine_loader import create_engine
from .token_selector import TokenSelector

Expand Down Expand Up @@ -243,7 +243,7 @@ class TpuGeneratorJetStream(Generator):

def __init__(
self,
engine: HfEngine,
engine: PyTorchEngine,
tokenizer: PreTrainedTokenizerBase,
):
self.engine = engine
Expand Down

0 comments on commit 1676b67

Please sign in to comment.