@@ -586,24 +586,32 @@ int unifyfs_logio_alloc(logio_context* ctx,
586
586
return UNIFYFS_SUCCESS ;
587
587
}
588
588
589
- /* could not get full allocation in shmem, reserve any available
590
- * chunks at the end of the shmem log */
591
- size_t log_end_chunks = chunkmap -> total_slots -
592
- (chunkmap -> last_used_slot + 1 );
593
- if (log_end_chunks > 0 ) {
594
- res_chunks = log_end_chunks ;
595
- res_slot = slotmap_reserve (chunkmap , res_chunks );
596
- if (-1 != res_slot ) {
597
- /* reserved all chunks at end of shmem log */
598
- allocated_bytes = res_chunks * chunk_sz ;
599
- needed_bytes -= allocated_bytes ;
600
- res_off = (off_t )(res_slot * chunk_sz );
601
- mem_allocation = allocated_bytes ;
602
- mem_res_slot = res_slot ;
603
- mem_res_nchk = res_chunks ;
604
- mem_res_at_end = 1 ;
589
+ if (NULL != ctx -> spill_hdr ) {
590
+ /* could not get full allocation in shmem, try to reserve any
591
+ * available chunks at the end of the shmem log before asking
592
+ * for chunks at the beginning of the spill log */
593
+ size_t log_end_chunks = chunkmap -> total_slots -
594
+ (chunkmap -> last_used_slot + 1 );
595
+ if (log_end_chunks > 0 ) {
596
+ res_chunks = log_end_chunks ;
597
+ res_slot = slotmap_reserve (chunkmap , res_chunks );
598
+ if (-1 != res_slot ) {
599
+ /* reserved all chunks at end of shmem log */
600
+ allocated_bytes = res_chunks * chunk_sz ;
601
+ needed_bytes -= allocated_bytes ;
602
+ res_off = (off_t )(res_slot * chunk_sz );
603
+ mem_allocation = allocated_bytes ;
604
+ mem_res_slot = res_slot ;
605
+ mem_res_nchk = res_chunks ;
606
+ mem_res_at_end = 1 ;
607
+ }
605
608
}
606
- } else {
609
+ }
610
+
611
+ /* NOTE: when we have a reservation of chunks at the end of the shmem
612
+ * log we don't unlock the shmem_hdr here because we need to
613
+ * keep it locked through the spill alloc attempt */
614
+ if (!mem_res_at_end ) {
607
615
UNLOCK_LOG_HEADER (shmem_hdr );
608
616
}
609
617
}
0 commit comments