Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Update to use kernels from vllm-project#7651
Browse files Browse the repository at this point in the history
  • Loading branch information
tlrmchlsmth committed Aug 29, 2024
1 parent 06b146e commit 8e16aca
Showing 1 changed file with 5 additions and 4 deletions.
9 changes: 5 additions & 4 deletions vllm/model_executor/models/mamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from typing import Iterable, List, Optional, Tuple

import torch
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from torch import nn
from torch.nn.parameter import Parameter
from transformers import MambaConfig
Expand All @@ -21,6 +18,10 @@
MergedColumnParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.ops.causal_conv1d import (
causal_conv1d_fn, causal_conv1d_update)
from vllm.model_executor.layers.mamba.ops.mamba_ssm import (
selective_scan_fn, selective_state_update)
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig)
from vllm.model_executor.layers.sampler import Sampler
Expand Down Expand Up @@ -157,7 +158,7 @@ def mamba_forward(self,
(self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_state.copy_(conv_states)

hidden_states = causal_conv1d_fn(
hidden_states, _ = causal_conv1d_fn(
hidden_states,
conv_weights,
self.conv1d.bias,
Expand Down

0 comments on commit 8e16aca

Please sign in to comment.