diff --git a/candle-nn/src/ops.rs b/candle-nn/src/ops.rs index beb771aaf9..9a360c472c 100644 --- a/candle-nn/src/ops.rs +++ b/candle-nn/src/ops.rs @@ -927,33 +927,6 @@ pub fn replication_pad2d(xs: &Tensor, pad: usize) -> Result { } } -#[cfg(feature = "cuda")] -pub fn kvconcat(ltensor: &Tensor, rtensor: &Tensor, concat_dim: usize) -> Result { - if !ltensor.device().is_cuda() { - return Tensor::cat(&[ltensor, &rtensor], concat_dim as usize)?.contiguous(); - } - use candle::cuda_backend::KVConcat; - let op = KVConcat { concat_dim }; - //inputs for kvconcat must be contiguous tensors - if ltensor.is_contiguous() && rtensor.is_contiguous() { - ltensor.apply_op2(&rtensor, op) - } else if ltensor.is_contiguous() { - ltensor.apply_op2(&rtensor.contiguous()?, op) - } else if rtensor.is_contiguous() { - let ltensor = ltensor.contiguous()?; - ltensor.apply_op2(&rtensor, op) - } else { - let ltensor = ltensor.contiguous()?; - let rtensor = rtensor.contiguous()?; - ltensor.apply_op2(&rtensor, op) - } -} - -#[cfg(not(feature = "cuda"))] -pub fn kvconcat(ltensor: &Tensor, rtensor: &Tensor, concat_dim: i32) -> Result { - Tensor::cat(&[ltensor, rtensor], concat_dim as usize)?.contiguous() -} - #[derive(Clone, Debug)] pub struct Identity; diff --git a/candle-transformers/src/models/vgg.rs b/candle-transformers/src/models/vgg.rs index 7c8dad510e..413df68857 100644 --- a/candle-transformers/src/models/vgg.rs +++ b/candle-transformers/src/models/vgg.rs @@ -54,7 +54,7 @@ impl ModuleT for Vgg<'_> { fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result> { let layers = convs .iter() - .map(|(in_c, out_c, name)| { + .map(|&(in_c, out_c, name)| { candle_nn::conv2d( *in_c, *out_c,