@@ -217,17 +217,6 @@ mod unix_test {
217
217
. set_bool ( "datafusion.execution.coalesce_batches" , false )
218
218
. with_target_partitions ( 1 ) ;
219
219
let ctx = SessionContext :: new_with_config ( config) ;
220
- // Tasks
221
- let mut tasks: Vec < JoinHandle < ( ) > > = vec ! [ ] ;
222
-
223
- // Join filter
224
- let a1_iter = 0 ..TEST_DATA_SIZE ;
225
- // Join key
226
- let a2_iter = ( 0 ..TEST_DATA_SIZE ) . map ( |x| x % 10 ) ;
227
- let lines = a1_iter
228
- . zip ( a2_iter)
229
- . map ( |( a1, a2) | format ! ( "{a1},{a2}\n " ) )
230
- . collect :: < Vec < _ > > ( ) ;
231
220
232
221
// Create a new temporary FIFO file
233
222
let tmp_dir = TempDir :: new ( ) ?;
@@ -238,22 +227,6 @@ mod unix_test {
238
227
// Create a mutex for tracking if the right input source is waiting for data.
239
228
let waiting = Arc :: new ( AtomicBool :: new ( true ) ) ;
240
229
241
- // Create writing threads for the left and right FIFO files
242
- tasks. push ( create_writing_thread (
243
- left_fifo. clone ( ) ,
244
- "a1,a2\n " . to_owned ( ) ,
245
- lines. clone ( ) ,
246
- waiting. clone ( ) ,
247
- TEST_BATCH_SIZE ,
248
- ) ) ;
249
- tasks. push ( create_writing_thread (
250
- right_fifo. clone ( ) ,
251
- "a1,a2\n " . to_owned ( ) ,
252
- lines. clone ( ) ,
253
- waiting. clone ( ) ,
254
- TEST_BATCH_SIZE ,
255
- ) ) ;
256
-
257
230
// Create schema
258
231
let schema = Arc :: new ( Schema :: new ( vec ! [
259
232
Field :: new( "a1" , DataType :: UInt32 , false ) ,
@@ -264,10 +237,10 @@ mod unix_test {
264
237
let order = vec ! [ vec![ datafusion_expr:: col( "a1" ) . sort( true , false ) ] ] ;
265
238
266
239
// Set unbounded sorted files read configuration
267
- let provider = fifo_table ( schema. clone ( ) , left_fifo, order. clone ( ) ) ;
240
+ let provider = fifo_table ( schema. clone ( ) , left_fifo. clone ( ) , order. clone ( ) ) ;
268
241
ctx. register_table ( "left" , provider) ?;
269
242
270
- let provider = fifo_table ( schema. clone ( ) , right_fifo, order) ;
243
+ let provider = fifo_table ( schema. clone ( ) , right_fifo. clone ( ) , order) ;
271
244
ctx. register_table ( "right" , provider) ?;
272
245
273
246
// Execute the query, with no matching rows. (since key is modulus 10)
@@ -287,6 +260,34 @@ mod unix_test {
287
260
. await ?;
288
261
let mut stream = df. execute_stream ( ) . await ?;
289
262
let mut operations = vec ! [ ] ;
263
+
264
+ // Tasks
265
+ let mut tasks: Vec < JoinHandle < ( ) > > = vec ! [ ] ;
266
+
267
+ // Join filter
268
+ let a1_iter = 0 ..TEST_DATA_SIZE ;
269
+ // Join key
270
+ let a2_iter = ( 0 ..TEST_DATA_SIZE ) . map ( |x| x % 10 ) ;
271
+ let lines = a1_iter
272
+ . zip ( a2_iter)
273
+ . map ( |( a1, a2) | format ! ( "{a1},{a2}\n " ) )
274
+ . collect :: < Vec < _ > > ( ) ;
275
+
276
+ // Create writing threads for the left and right FIFO files
277
+ tasks. push ( create_writing_thread (
278
+ left_fifo,
279
+ "a1,a2\n " . to_owned ( ) ,
280
+ lines. clone ( ) ,
281
+ waiting. clone ( ) ,
282
+ TEST_BATCH_SIZE ,
283
+ ) ) ;
284
+ tasks. push ( create_writing_thread (
285
+ right_fifo,
286
+ "a1,a2\n " . to_owned ( ) ,
287
+ lines. clone ( ) ,
288
+ waiting. clone ( ) ,
289
+ TEST_BATCH_SIZE ,
290
+ ) ) ;
290
291
// Partial.
291
292
while let Some ( Ok ( batch) ) = stream. next ( ) . await {
292
293
waiting. store ( false , Ordering :: SeqCst ) ;
0 commit comments