Skip to content

Commit

Permalink
yapf reformat vllm/core and vllm/block.py
Browse files Browse the repository at this point in the history
  • Loading branch information
jberkhahn committed Aug 23, 2024
1 parent a0a6b07 commit b683f87
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 2 deletions.
1 change: 1 addition & 0 deletions vllm/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def __getitem__(self, key):
return self._blocks[key]

if "TYPE_CHECKING":

def __iter__(self) -> Iterator[PhysicalTokenBlock]:
raise RuntimeError("Method should be automatically generated")

Expand Down
3 changes: 2 additions & 1 deletion vllm/core/block_manager_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,8 @@ def _allocate_sequence(self, \
for logical_idx in range(num_prompt_blocks):
if (self.block_sliding_window is not None
and logical_idx >= self.block_sliding_window):
block = block_table[logical_idx % self.block_sliding_window]
block = block_table[logical_idx %
self.block_sliding_window]
# Set the reference counts of the token blocks.
block.ref_count = ref_count
elif not is_encoder_decoder and self.enable_caching:
Expand Down
3 changes: 2 additions & 1 deletion vllm/core/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,8 @@ def scheduler_running_outputs_builder():


def scheduled_seq_group_builder():
return ScheduledSequenceGroup(SequenceGroup("", [], -1), token_chunk_size=0)
return ScheduledSequenceGroup(SequenceGroup("", [], -1),
token_chunk_size=0)
# return ScheduledSequenceGroup(seq_group=None, token_chunk_size=0)


Expand Down

0 comments on commit b683f87

Please sign in to comment.