We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c1d61a3 commit fb47ba9Copy full SHA for fb47ba9
tests/unit/experience/test_rollouts.py
@@ -221,6 +221,7 @@ def initial_multi_step_calculator_batch(rollout_tokenizer):
221
"stop_token_ids": None,
222
"stop_strings": None,
223
"vllm_cfg": {
224
+ "async_engine": False,
225
"precision": "bfloat16",
226
"tensor_parallel_size": 1,
227
"max_model_len": 2048,
tests/unit/models/generation/test_vllm_generation.py
@@ -350,6 +350,9 @@ async def test_vllm_policy_generation_async(
350
hf_policy.shutdown()
351
352
353
+@pytest.mark.skip(
354
+ reason="Skipping for now, will be fixed in https://github.com/NVIDIA/NeMo-RL/issues/408"
355
+)
356
def test_vllm_worker_seed_behavior(cluster, tokenizer):
357
"""
358
1. Different workers generate different outputs for identical prompts due to different seeds
0 commit comments