Skip to content

Commit 433c4a4

Browse files
authored
Make vllm compatible with verl (vllm-project#12824)
Co-authored-by: zhangshulai <[email protected]>
1 parent ef533d2 commit 433c4a4

File tree

2 files changed

+1
-8
lines changed

2 files changed

+1
-8
lines changed

vllm/distributed/parallel_state.py

-7
Original file line numberDiff line numberDiff line change
@@ -1024,13 +1024,6 @@ def initialize_model_parallel(
10241024
backend = backend or torch.distributed.get_backend(
10251025
get_world_group().device_group)
10261026

1027-
if (world_size
1028-
!= tensor_model_parallel_size * pipeline_model_parallel_size):
1029-
raise RuntimeError(
1030-
f"world_size ({world_size}) is not equal to "
1031-
f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
1032-
f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
1033-
10341027
# Build the tensor model-parallel groups.
10351028
num_tensor_model_parallel_groups: int = (world_size //
10361029
tensor_model_parallel_size)

vllm/executor/uniproc_executor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def _init_executor(self) -> None:
101101
# - MASTER_PORT
102102
distributed_init_method = "env://"
103103
rank = int(os.environ["RANK"])
104-
local_rank = rank
104+
local_rank = int(os.environ["LOCAL_RANK"])
105105
is_driver_worker = True
106106
kwargs = dict(
107107
vllm_config=self.vllm_config,

0 commit comments

Comments
 (0)