@@ -539,6 +539,7 @@ impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
539
539
Ok ( Self :: new_with_metadata ( reader, metadata) )
540
540
}
541
541
542
+ /// Create a new [`ParquetRecordBatchReaderBuilder`] with [`ArrowReaderOptions`] and [`FileDecryptionProperties`]
542
543
pub fn try_new_with_decryption (
543
544
reader : T ,
544
545
options : ArrowReaderOptions ,
@@ -569,6 +570,7 @@ impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
569
570
/// # use arrow_schema::{DataType, Field, Schema};
570
571
/// # use parquet::arrow::arrow_reader::{ArrowReaderMetadata, ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder};
571
572
/// # use parquet::arrow::ArrowWriter;
573
+ /// #
572
574
/// # let mut file: Vec<u8> = Vec::with_capacity(1024);
573
575
/// # let schema = Arc::new(Schema::new(vec![Field::new("i32", DataType::Int32, false)]));
574
576
/// # let mut writer = ArrowWriter::try_new(&mut file, schema.clone(), None).unwrap();
@@ -577,7 +579,7 @@ impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
577
579
/// # writer.close().unwrap();
578
580
/// # let file = Bytes::from(file);
579
581
/// #
580
- /// let metadata = ArrowReaderMetadata::load(&file, Default::default()).unwrap();
582
+ /// let metadata = ArrowReaderMetadata::load(&file, Default::default(), None ).unwrap();
581
583
/// let mut a = ParquetRecordBatchReaderBuilder::new_with_metadata(file.clone(), metadata.clone()).build().unwrap();
582
584
/// let mut b = ParquetRecordBatchReaderBuilder::new_with_metadata(file, metadata).build().unwrap();
583
585
///
@@ -804,6 +806,9 @@ impl ParquetRecordBatchReader {
804
806
. build ( )
805
807
}
806
808
809
+ /// Create a new [`ParquetRecordBatchReader`] from the provided chunk reader and [`FileDecryptionProperties`]
810
+ ///
811
+ /// Note: this is needed when the parquet file is encrypted
807
812
pub fn try_new_with_decryption < T : ChunkReader + ' static > (
808
813
reader : T ,
809
814
batch_size : usize ,
0 commit comments