diff --git a/src/turbomind/triton_backend/llama/LlamaTritonModel.cc b/src/turbomind/triton_backend/llama/LlamaTritonModel.cc index 5fbd4287a8..8db13652f5 100644 --- a/src/turbomind/triton_backend/llama/LlamaTritonModel.cc +++ b/src/turbomind/triton_backend/llama/LlamaTritonModel.cc @@ -336,12 +336,6 @@ LlamaTritonModel::LlamaTritonModel(size_t tensor_para_size, } else { moe_param_.method = ft::MoeParam::kFused; - // Note: This will fail when GPUs of different SMs are mixed - if (weight_type_ != ft::WeightType::kINT4 && ft::getSMVersion() >= 90) { - // On sm90 the cuBLAS method may be faster as our grouped GEMM is not - // optimized for GMMA yet - moe_param_.method = ft::MoeParam::kNaive; - } } TM_LOG_INFO("%s", toString().c_str()); @@ -380,6 +374,10 @@ std::unique_ptr> LlamaTritonModel::createSharedModelInstance( shared_state_, device_id); + // Wait for pinned buffers to be allocated for all ranks, otherwise tuning will hang + // due to concurrent kernel launch & cudaMallocHost + shared_state_->barrier->wait(); + engine->Start(); return engine;