Skip to content

Commit

Permalink
onnx: fix pad, unsqueeze (huggingface#2317)
Browse files Browse the repository at this point in the history
* onnx: fix pad, unsqueeze

both implementations have off-by-one errors:
- Pad 'reflect' cycle for eg `dim==3` is `[0,1,2,1]` which has length of
  4 (or `dim*2 - 2`) not 5 (current code `dim*2 - 1`)
- Unsqueeze(-1) for tensor with `dim==3` should be 3 (ie `dim+index+1`)
  not 2 (ie currently `dim+index`)

in addition, Pad is incorrectly calculating the starting padding.
If we want to pad out 2 elements to the start, and we have this cycle
of indices of length 6, then we should skip 4 elements, but currently
we skip 2. A more visual representation of what's going on is below:

```
pad_start: 2
data:      [a,b,c,d]
indices:   [0, 1, 2, 3, 2, 1, 0, 1, 2, 3, 2, 1, 0, ..] // zigzag between 0..4
actual:    skip [ c  d| c  b  a  b]
expected:  ~  skip  ~ [ c  b| a  b  c  d]
```

The values between `[` and `|` are padding and the values between
`|` and `]` in the example should match the original data being padded.

* Fix clippy lints.

---------

Co-authored-by: Laurent <[email protected]>
  • Loading branch information
shua and LaurentMazare authored Jul 23, 2024
1 parent ebc9aa6 commit 6056fd5
Show file tree
Hide file tree
Showing 19 changed files with 93 additions and 26 deletions.
2 changes: 1 addition & 1 deletion candle-examples/examples/beit/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use candle_transformers::models::beit;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 384, 384). Beit special normalization is applied.
pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/blip/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ const SEP_TOKEN_ID: u32 = 102;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 384, 384). OpenAI normalization is applied.
pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/clip/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ struct Args {
}

fn load_image<T: AsRef<std::path::Path>>(path: T, image_size: usize) -> anyhow::Result<Tensor> {
let img = image::io::Reader::open(path)?.decode()?;
let img = image::ImageReader::open(path)?.decode()?;
let (height, width) = (image_size, image_size);
let img = img.resize_to_fill(
width as u32,
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/eva2/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use candle_transformers::models::eva2;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 448, 448). OpenAI normalization is applied.
pub fn load_image448_openai_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(448, 448, image::imageops::FilterType::Triangle);
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/llava/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ fn load_image<T: AsRef<std::path::Path>>(
llava_config: &LLaVAConfig,
dtype: DType,
) -> Result<((u32, u32), Tensor)> {
let img = image::io::Reader::open(path)?.decode()?;
let img = image::ImageReader::open(path)?.decode()?;
let img_tensor = process_image(&img, processor, llava_config)?;
Ok(((img.width(), img.height()), img_tensor.to_dtype(dtype)?))
}
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/moondream/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ struct Args {
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 378, 378).
pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> candle::Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/segment-anything/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ pub fn main() -> anyhow::Result<()> {
let (_one, h, w) = mask.dims3()?;
let mask = mask.expand((3, h, w))?;

let mut img = image::io::Reader::open(&args.image)?
let mut img = image::ImageReader::open(&args.image)?
.decode()
.map_err(candle::Error::wrap)?;
let mask_pixels = mask.permute((1, 2, 0))?.flatten_all()?.to_vec1::<u8>()?;
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/stable-diffusion/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ fn text_embeddings(
}

fn image_preprocess<T: AsRef<std::path::Path>>(path: T) -> anyhow::Result<Tensor> {
let img = image::io::Reader::open(path)?.decode()?;
let img = image::ImageReader::open(path)?.decode()?;
let (height, width) = (img.height() as usize, img.width() as usize);
let height = height - height % 32;
let width = width - width % 32;
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/trocr/image_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl ViTImageProcessor {
pub fn load_images(&self, image_path: Vec<&str>) -> Result<Vec<image::DynamicImage>> {
let mut images: Vec<image::DynamicImage> = Vec::new();
for path in image_path {
let img = image::io::Reader::open(path)?.decode().unwrap();
let img = image::ImageReader::open(path)?.decode().unwrap();
images.push(img);
}

Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/yolo-v3/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ pub fn main() -> Result<()> {
let net_width = darknet.width()?;
let net_height = darknet.height()?;

let original_image = image::io::Reader::open(&image_name)?
let original_image = image::ImageReader::open(&image_name)?
.decode()
.map_err(candle::Error::wrap)?;
let image = {
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/examples/yolo-v8/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ pub fn run<T: Task>(args: Args) -> anyhow::Result<()> {
for image_name in args.images.iter() {
println!("processing {image_name}");
let mut image_name = std::path::PathBuf::from(image_name);
let original_image = image::io::Reader::open(&image_name)?
let original_image = image::ImageReader::open(&image_name)?
.decode()
.map_err(candle::Error::wrap)?;
let (width, height) = {
Expand Down
2 changes: 1 addition & 1 deletion candle-examples/src/imagenet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use candle::{Device, Result, Tensor};
/// Loads an image from disk using the image crate at the requested resolution.
// This returns a tensor with shape (3, res, res). imagenet normalization is applied.
pub fn load_image<P: AsRef<std::path::Path>>(p: P, res: u32) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(res, res, image::imageops::FilterType::Triangle);
Expand Down
4 changes: 2 additions & 2 deletions candle-examples/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub fn load_image<P: AsRef<std::path::Path>>(
p: P,
resize_longest: Option<usize>,
) -> Result<(Tensor, usize, usize)> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?;
let (initial_h, initial_w) = (img.height() as usize, img.width() as usize);
Expand Down Expand Up @@ -65,7 +65,7 @@ pub fn load_image_and_resize<P: AsRef<std::path::Path>>(
width: usize,
height: usize,
) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(
Expand Down
9 changes: 7 additions & 2 deletions candle-onnx/src/eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,11 @@ fn simple_eval_(
.map(|&i| {
if i == xs.rank() as i64 {
Ok(xs.rank())
} else if i < 0 {
// normalize_axis doesn't work correctly here
// because we actually want normalized with respect
// to the final size, not the current (off by one)
Ok(xs.rank() - (-i as usize) + 1)
} else {
xs.normalize_axis(i)
}
Expand Down Expand Up @@ -1040,8 +1045,8 @@ fn simple_eval_(
std::iter::repeat((min..max).chain((min + 1..=max).rev())).flatten()
}
let idx = if dim > 1 {
let cycle_len = dim * 2 - 1;
let skip = (pads_pre[i] as usize) % cycle_len;
let cycle_len = dim * 2 - 2;
let skip = cycle_len - ((pads_pre[i] as usize) % cycle_len);
let idx = zigzag(0, (dim - 1) as i64)
.skip(skip)
.take((pads_pre[i] as usize) + dim + (pads_post[i] as usize));
Expand Down
72 changes: 67 additions & 5 deletions candle-onnx/tests/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -977,7 +977,59 @@ fn test_constant_of_shape() -> Result<()> {
}

// "Unsqueeze"
// #[test]
#[test]
fn test_unsqueeze() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Unsqueeze".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![
1.0f32, 2.0f32, //
3.0f32, 4.0f32, //
],
&[2, 2],
&Device::Cpu,
)?;
let y = Tensor::from_vec(vec![-1i64], &[1], &Device::Cpu)?;

let inputs = HashMap::from_iter([(INPUT_X.to_string(), x.clone()), (INPUT_Y.to_string(), y)]);

let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);

let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(z.dims(), &[2, 2, 1]);
assert_eq!(
z.flatten_all()?.to_vec1::<f32>()?,
x.flatten_all()?.to_vec1::<f32>()?
);

Ok(())
}

// "Clip"
// #[test]
Expand Down Expand Up @@ -3268,13 +3320,23 @@ fn test_if() -> Result<()> {

#[test]
fn test_pad() -> Result<()> {
let data = Tensor::from_vec(vec![1.0, 1.2, 2.3, 3.4, 4.5, 5.7], (3, 2), &Device::Cpu)?;
let pads = Tensor::from_vec(vec![0i64, 2, 0, 0], (4,), &Device::Cpu)?;
let data = Tensor::from_vec(
vec![
1.0, 2.0, 3.0, //
4.0, 5.0, 6.0, //
],
(2, 3),
&Device::Cpu,
)?;
let pads = Tensor::from_vec(vec![0i64, 1, 0, 0], (4,), &Device::Cpu)?;
let mode = "reflect";

let expected = Tensor::from_vec(
vec![1.0, 1.2, 1.0, 1.2, 2.3, 3.4, 2.3, 3.4, 4.5, 5.7, 4.5, 5.7],
(3, 4),
vec![
2.0, 1.0, 2.0, 3.0, //
5.0, 4.0, 5.0, 6.0, //
],
(2, 4),
&Device::Cpu,
)?;

Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/blip/src/bin/m.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl Model {
impl Model {
fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> {
let device = &Device::Cpu;
let img = image::io::Reader::new(std::io::Cursor::new(image))
let img = image::ImageReader::new(std::io::Cursor::new(image))
.with_guessed_format()?
.decode()
.map_err(|e| JsError::new(&e.to_string()))?
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/moondream/src/bin/m.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ impl Model {
}
impl Model {
fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> {
let img = image::io::Reader::new(std::io::Cursor::new(image))
let img = image::ImageReader::new(std::io::Cursor::new(image))
.with_guessed_format()?
.decode()
.map_err(|e| JsError::new(&e.to_string()))?
Expand Down
2 changes: 1 addition & 1 deletion candle-wasm-examples/segment-anything/src/bin/m.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ impl Model {
pub fn set_image_embeddings(&mut self, image_data: Vec<u8>) -> Result<(), JsError> {
sam::console_log!("image data: {}", image_data.len());
let image_data = std::io::Cursor::new(image_data);
let image = image::io::Reader::new(image_data)
let image = image::ImageReader::new(image_data)
.with_guessed_format()?
.decode()
.map_err(candle::Error::wrap)?;
Expand Down
4 changes: 2 additions & 2 deletions candle-wasm-examples/yolo/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ impl Model {
) -> Result<Vec<Vec<Bbox>>> {
console_log!("image data: {}", image_data.len());
let image_data = std::io::Cursor::new(image_data);
let original_image = image::io::Reader::new(image_data)
let original_image = image::ImageReader::new(image_data)
.with_guessed_format()?
.decode()
.map_err(candle::Error::wrap)?;
Expand Down Expand Up @@ -127,7 +127,7 @@ impl ModelPose {
) -> Result<Vec<Bbox>> {
console_log!("image data: {}", image_data.len());
let image_data = std::io::Cursor::new(image_data);
let original_image = image::io::Reader::new(image_data)
let original_image = image::ImageReader::new(image_data)
.with_guessed_format()?
.decode()
.map_err(candle::Error::wrap)?;
Expand Down

0 comments on commit 6056fd5

Please sign in to comment.