diff --git a/Kernel/Bus/USB/UHCI/UHCIController.cpp b/Kernel/Bus/USB/UHCI/UHCIController.cpp index 9a9e0a701f8c47..51983d45928e50 100644 --- a/Kernel/Bus/USB/UHCI/UHCIController.cpp +++ b/Kernel/Bus/USB/UHCI/UHCIController.cpp @@ -109,7 +109,8 @@ ErrorOr UHCIController::reset() } // Let's allocate the physical page for the Frame List (which is 4KiB aligned) - m_framelist = TRY(MM.allocate_dma_buffer_page("UHCI Framelist"sv, Memory::Region::Access::Write)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_framelist = TRY(MM.allocate_dma_buffer_page("UHCI Framelist"sv, Memory::Region::Access::Write, Memory::MemoryType::IO)); dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr()); dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr()); write_sofmod(64); // 1mS frame time @@ -145,7 +146,8 @@ UNMAP_AFTER_INIT ErrorOr UHCIController::create_structures() // Now the Transfer Descriptor pool m_transfer_descriptor_pool = TRY(UHCIDescriptorPool::try_create("Transfer Descriptor Pool"sv)); - m_isochronous_transfer_pool = TRY(MM.allocate_dma_buffer_page("UHCI Isochronous Descriptor Pool"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_isochronous_transfer_pool = TRY(MM.allocate_dma_buffer_page("UHCI Isochronous Descriptor Pool"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); // Set up the Isochronous Transfer Descriptor list m_iso_td_list.resize(UHCI_NUMBER_OF_ISOCHRONOUS_TDS); diff --git a/Kernel/Bus/USB/USBPipe.cpp b/Kernel/Bus/USB/USBPipe.cpp index a4c8d54744c277..3906e5f285161b 100644 --- a/Kernel/Bus/USB/USBPipe.cpp +++ b/Kernel/Bus/USB/USBPipe.cpp @@ -34,7 +34,8 @@ ErrorOr Pipe::clear_halt() ErrorOr> ControlPipe::create(USBController const& controller, Device& device, u8 endpoint_number, u16 max_packet_size, size_t buffer_size) { - auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB device DMA buffer"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB device DMA buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return adopt_nonnull_own_or_enomem(new (nothrow) ControlPipe(controller, device, endpoint_number, max_packet_size, move(dma_buffer))); } @@ -74,7 +75,8 @@ ErrorOr ControlPipe::submit_control_transfer(u8 request_type, u8 request ErrorOr> BulkInPipe::create(USBController const& controller, Device& device, u8 endpoint_number, u16 max_packet_size, size_t buffer_size) { VERIFY(buffer_size >= max_packet_size); - auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return adopt_nonnull_own_or_enomem(new (nothrow) BulkInPipe(controller, device, endpoint_number, max_packet_size, move(dma_buffer))); } @@ -120,7 +122,8 @@ ErrorOr BulkInPipe::submit_bulk_in_transfer(size_t length, UserOrKernelB ErrorOr> BulkOutPipe::create(USBController const& controller, Device& device, u8 endpoint_number, u16 max_packet_size, size_t buffer_size) { VERIFY(buffer_size >= max_packet_size); - auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return adopt_nonnull_own_or_enomem(new (nothrow) BulkOutPipe(controller, device, endpoint_number, max_packet_size, move(dma_buffer))); } @@ -166,7 +169,8 @@ ErrorOr BulkOutPipe::submit_bulk_out_transfer(size_t length, UserOrKerne ErrorOr> InterruptInPipe::create(USBController const& controller, Device& device, u8 endpoint_number, u16 max_packet_size, u16 poll_interval, size_t buffer_size) { VERIFY(buffer_size >= max_packet_size); - auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return adopt_nonnull_own_or_enomem(new (nothrow) InterruptInPipe(controller, device, endpoint_number, max_packet_size, poll_interval, move(dma_buffer))); } @@ -189,7 +193,8 @@ ErrorOr> InterruptInPipe::submit_interrupt_in_transf ErrorOr> InterruptOutPipe::create(USBController const& controller, Device& device, u8 endpoint_number, u16 max_packet_size, u16 poll_interval, size_t buffer_size) { VERIFY(buffer_size >= max_packet_size); - auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto dma_buffer = TRY(MM.allocate_dma_buffer_pages(TRY(Memory::page_round_up(buffer_size)), "USB pipe DMA buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return adopt_nonnull_own_or_enomem(new (nothrow) InterruptOutPipe(controller, device, endpoint_number, max_packet_size, poll_interval, move(dma_buffer))); } diff --git a/Kernel/Bus/USB/xHCI/xHCIController.cpp b/Kernel/Bus/USB/xHCI/xHCIController.cpp index f85874edb26e9a..9555e1ca54dcb5 100644 --- a/Kernel/Bus/USB/xHCI/xHCIController.cpp +++ b/Kernel/Bus/USB/xHCI/xHCIController.cpp @@ -216,7 +216,8 @@ ErrorOr xHCIController::initialize() m_operational_registers.configure.max_device_slots_enabled = m_device_slots; // 2. Program the Device Context Base Address Array Pointer (DCBAAP) register (5.4.6) with a 64-bit address pointing to where the Device Context Base Address Array is located. - m_device_context_base_address_array_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up((m_device_slots + 1) * sizeof(u64))), "xHCI Device Context Base Address Array"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_device_context_base_address_array_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up((m_device_slots + 1) * sizeof(u64))), "xHCI Device Context Base Address Array"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); dbgln_if(XHCI_DEBUG, "xHCI: Device Context Base Address Array - {} / {}", m_device_context_base_address_array_region->vaddr(), m_device_context_base_address_array_region->physical_page(0)->paddr()); m_device_context_base_address_array = reinterpret_cast(m_device_context_base_address_array_region->vaddr().as_ptr()); auto requested_scratchpad_buffers = (m_capability_registers.structural_parameters.max_scratchpad_buffers_high << 5) | m_capability_registers.structural_parameters.max_scratchpad_buffers_low; @@ -238,7 +239,8 @@ ErrorOr xHCIController::initialize() m_operational_registers.device_context_base_address_array_pointer.high = device_context_base_address_array_pointer >> 32; // 3. Define the Command Ring Dequeue Pointer by programming the Command Ring Control Register (5.4.5) with a 64-bit address pointing to the starting address of the first TRB of the Command Ring. - m_command_and_event_rings_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(sizeof(CommandAndEventRings))), "xHCI Command and Event Rings"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_command_and_event_rings_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(sizeof(CommandAndEventRings))), "xHCI Command and Event Rings"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); dbgln_if(XHCI_DEBUG, "xHCI: Command and Event Rings - {} / {}", m_command_and_event_rings_region->vaddr(), m_command_and_event_rings_region->physical_page(0)->paddr()); auto command_and_event_rings_region_virtual_address = m_command_and_event_rings_region->vaddr().get(); m_command_ring = reinterpret_cast(command_and_event_rings_region_virtual_address + __builtin_offsetof(CommandAndEventRings, command_ring)); @@ -545,7 +547,8 @@ ErrorOr xHCIController::initialize_device(USB::Device& device) // 5. After successfully obtaining a Device Slot, system software shall initialize the data structures associated with the slot as described in section 4.3.3. // 1. Allocate an Input Context data structure (6.2.5) and initialize all fields to ‘0’. - slot_state.input_context_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(input_context_size())), "xHCI Input Context"sv, Memory::Region::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + slot_state.input_context_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(input_context_size())), "xHCI Input Context"sv, Memory::Region::ReadWrite, Memory::MemoryType::IO)); // 2. Initialize the Input Control Context (6.2.5.1) of the Input Context by setting the A0 and A1 flags to ‘1’. // These flags indicate that the Slot Context and the Endpoint 0 Context of the Input Context are affected by the command. @@ -623,7 +626,8 @@ ErrorOr xHCIController::initialize_device(USB::Device& device) // 4. Allocate and initialize the Transfer Ring for the Default Control Endpoint. // Refer to section 4.9 for TRB Ring initialization requirements and to section 6.4 for the formats of TRBs - slot_state.endpoint_rings[0].region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(endpoint_ring_size * sizeof(TransferRequestBlock))), "xHCI Endpoint Rings"sv, Memory::Region::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + slot_state.endpoint_rings[0].region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(endpoint_ring_size * sizeof(TransferRequestBlock))), "xHCI Endpoint Rings"sv, Memory::Region::ReadWrite, Memory::MemoryType::IO)); auto* endpoint_ring_memory = slot_state.endpoint_rings[0].ring_vaddr(); endpoint_ring_memory[endpoint_ring_size - 1].generic.transfer_request_block_type = TransferRequestBlock::TRBType::Link; auto endpoint_ring_address = slot_state.endpoint_rings[0].ring_paddr(); @@ -671,7 +675,8 @@ ErrorOr xHCIController::initialize_device(USB::Device& device) endpoint_context->average_transfer_request_block = 8; // 6. Allocate the Output Device Context data structure (6.2.1) and initialize it to ‘0’. - slot_state.device_context_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(device_context_size())), "xHCI Device Context"sv, Memory::Region::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + slot_state.device_context_region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(device_context_size())), "xHCI Device Context"sv, Memory::Region::ReadWrite, Memory::MemoryType::IO)); // 7. Load the appropriate (Device Slot ID) entry in the Device Context Base Address Array (5.4.6) with a pointer to the Output Device Context data structure (6.2.1). m_device_context_base_address_array[slot] = slot_state.device_context_region->physical_page(0)->paddr().get(); @@ -1000,7 +1005,8 @@ ErrorOr xHCIController::initialize_endpoint_if_needed(Pipe const& pipe) if (endpoint_ring.region) return {}; // Already initialized - endpoint_ring.region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(endpoint_ring_size * sizeof(TransferRequestBlock))), "xHCI Endpoint Rings"sv, Memory::Region::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + endpoint_ring.region = TRY(MM.allocate_dma_buffer_pages(MUST(Memory::page_round_up(endpoint_ring_size * sizeof(TransferRequestBlock))), "xHCI Endpoint Rings"sv, Memory::Region::ReadWrite, Memory::MemoryType::IO)); auto* endpoint_ring_memory = endpoint_ring.ring_vaddr(); endpoint_ring_memory[endpoint_ring_size - 1].generic.transfer_request_block_type = TransferRequestBlock::TRBType::Link; auto endpoint_ring_address = endpoint_ring.ring_paddr(); diff --git a/Kernel/Devices/Audio/AC97/AC97.cpp b/Kernel/Devices/Audio/AC97/AC97.cpp index a2f5d3434e350d..1a48f67ee8cf26 100644 --- a/Kernel/Devices/Audio/AC97/AC97.cpp +++ b/Kernel/Devices/Audio/AC97/AC97.cpp @@ -224,13 +224,16 @@ ErrorOr AC97::write(size_t channel_index, UserOrKernelBuffer const& data if (channel_index != 0) return Error::from_errno(ENODEV); - if (!m_output_buffer) - m_output_buffer = TRY(MM.allocate_dma_buffer_pages(m_output_buffer_page_count * PAGE_SIZE, "AC97 Output buffer"sv, Memory::Region::Access::Write)); + if (!m_output_buffer) { + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_output_buffer = TRY(MM.allocate_dma_buffer_pages(m_output_buffer_page_count * PAGE_SIZE, "AC97 Output buffer"sv, Memory::Region::Access::Write, Memory::MemoryType::IO)); + } if (!m_buffer_descriptor_list) { size_t buffer_descriptor_list_size = buffer_descriptor_list_max_entries * sizeof(BufferDescriptorListEntry); buffer_descriptor_list_size = TRY(Memory::page_round_up(buffer_descriptor_list_size)); - m_buffer_descriptor_list = TRY(MM.allocate_dma_buffer_pages(buffer_descriptor_list_size, "AC97 Buffer Descriptor List"sv, Memory::Region::Access::Write)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_buffer_descriptor_list = TRY(MM.allocate_dma_buffer_pages(buffer_descriptor_list_size, "AC97 Buffer Descriptor List"sv, Memory::Region::Access::Write, Memory::MemoryType::IO)); } Checked remaining = length; diff --git a/Kernel/Devices/Audio/IntelHDA/RingBuffer.h b/Kernel/Devices/Audio/IntelHDA/RingBuffer.h index cb9b3d2c2f76b2..4e0d60bbd4c19c 100644 --- a/Kernel/Devices/Audio/IntelHDA/RingBuffer.h +++ b/Kernel/Devices/Audio/IntelHDA/RingBuffer.h @@ -45,7 +45,8 @@ class ControllerRingBuffer { // Create a DMA buffer page to holds the ring buffer VERIFY(PAGE_SIZE >= capacity * sizeof(T)); - auto buffer_region = TRY(MM.allocate_dma_buffer_page(name, U == RingBufferType::Input ? Memory::Region::Access::Read : Memory::Region::Access::Write)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer_region = TRY(MM.allocate_dma_buffer_page(name, U == RingBufferType::Input ? Memory::Region::Access::Read : Memory::Region::Access::Write, Memory::MemoryType::IO)); // 4.4.1.1, 4.4.2: The CORB buffer in memory must be allocated to start on a 128-byte boundary // and in memory configured to match the access type being used. diff --git a/Kernel/Devices/Audio/IntelHDA/Stream.cpp b/Kernel/Devices/Audio/IntelHDA/Stream.cpp index a2fc7e07c663be..30d171b2c8b5ed 100644 --- a/Kernel/Devices/Audio/IntelHDA/Stream.cpp +++ b/Kernel/Devices/Audio/IntelHDA/Stream.cpp @@ -67,7 +67,8 @@ ErrorOr Stream::initialize_buffer() size_t cyclic_buffer_size_in_bytes = number_of_buffers_required_for_cyclic_buffer_size * PAGE_SIZE; TRY(m_buffers.with([&](auto& buffers) -> ErrorOr { - buffers = TRY(MM.allocate_dma_buffer_pages(cyclic_buffer_size_in_bytes, "IntelHDA Stream Buffers"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + buffers = TRY(MM.allocate_dma_buffer_pages(cyclic_buffer_size_in_bytes, "IntelHDA Stream Buffers"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); // 3.3.38 Input/Output/Bidirectional Stream Descriptor Cyclic Buffer Length m_stream_io_window->write32(StreamRegisterOffset::CyclicBufferLength, buffers->size()); @@ -76,7 +77,8 @@ ErrorOr Stream::initialize_buffer() m_stream_io_window->write16(StreamRegisterOffset::LastValidIndex, number_of_buffers_required_for_cyclic_buffer_size - 1); // 3.6.2: Buffer Descriptor List - m_buffer_descriptor_list = TRY(MM.allocate_dma_buffer_page("IntelHDA Stream BDL"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_buffer_descriptor_list = TRY(MM.allocate_dma_buffer_page("IntelHDA Stream BDL"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); auto bdl_physical_address = m_buffer_descriptor_list->physical_page(0)->paddr().get(); m_stream_io_window->write32(StreamRegisterOffset::BDLLowerBaseAddress, bdl_physical_address & 0xffffffffu); m_stream_io_window->write32(StreamRegisterOffset::BDLUpperBaseAddress, bdl_physical_address >> 32); diff --git a/Kernel/Devices/Storage/AHCI/Port.cpp b/Kernel/Devices/Storage/AHCI/Port.cpp index 36ab4fbd3d2fd4..cc5c6c5c5fb2e8 100644 --- a/Kernel/Devices/Storage/AHCI/Port.cpp +++ b/Kernel/Devices/Storage/AHCI/Port.cpp @@ -46,7 +46,8 @@ ErrorOr AHCIPort::allocate_resources_and_initialize_ports() m_command_table_pages.append(move(command_table_page)); } - m_command_list_region = TRY(MM.allocate_dma_buffer_page("AHCI Port Command List"sv, Memory::Region::Access::ReadWrite, m_command_list_page)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + m_command_list_region = TRY(MM.allocate_dma_buffer_page("AHCI Port Command List"sv, Memory::Region::Access::ReadWrite, m_command_list_page, Memory::MemoryType::IO)); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Command list page at {}", representative_port_index(), m_command_list_page->paddr()); dbgln_if(AHCI_DEBUG, "AHCI Port {}: FIS receive page at {}", representative_port_index(), m_fis_receive_page->paddr()); diff --git a/Kernel/Devices/Storage/NVMe/NVMeController.cpp b/Kernel/Devices/Storage/NVMe/NVMeController.cpp index 9dbc7651789863..0456c3f9cffd55 100644 --- a/Kernel/Devices/Storage/NVMe/NVMeController.cpp +++ b/Kernel/Devices/Storage/NVMe/NVMeController.cpp @@ -161,7 +161,8 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::identify_and_init_namespaces() u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)]; { - auto buffer = TRY(MM.allocate_dma_buffer_page("Identify PRP"sv, Memory::Region::Access::ReadWrite, prp_dma_buffer)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_page("Identify PRP"sv, Memory::Region::Access::ReadWrite, prp_dma_buffer, Memory::MemoryType::IO)); prp_dma_region = move(buffer); } @@ -224,7 +225,8 @@ ErrorOr NVMeController::identify_and_init_controller() IdentifyController ctrl {}; { - auto buffer = TRY(MM.allocate_dma_buffer_page("Identify PRP"sv, Memory::Region::Access::ReadWrite, prp_dma_buffer)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_page("Identify PRP"sv, Memory::Region::Access::ReadWrite, prp_dma_buffer, Memory::MemoryType::IO)); prp_dma_region = move(buffer); } @@ -250,13 +252,15 @@ ErrorOr NVMeController::identify_and_init_controller() OwnPtr eventidx_dma_region; { - auto buffer = TRY(MM.allocate_dma_buffer_page("shadow dbbuf"sv, Memory::Region::Access::ReadWrite, m_dbbuf_shadow_page)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_page("shadow dbbuf"sv, Memory::Region::Access::ReadWrite, m_dbbuf_shadow_page, Memory::MemoryType::IO)); dbbuf_dma_region = move(buffer); memset(dbbuf_dma_region->vaddr().as_ptr(), 0, PAGE_SIZE); } { - auto buffer = TRY(MM.allocate_dma_buffer_page("eventidx"sv, Memory::Region::Access::ReadWrite, m_dbbuf_eventidx_page)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_page("eventidx"sv, Memory::Region::Access::ReadWrite, m_dbbuf_eventidx_page, Memory::MemoryType::IO)); eventidx_dma_region = move(buffer); memset(eventidx_dma_region->vaddr().as_ptr(), 0, PAGE_SIZE); } @@ -325,7 +329,8 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_admin_queue(QueueType queu return maybe_error; } { - auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "Admin CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "Admin CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages, Memory::MemoryType::IO)); cq_dma_region = move(buffer); } @@ -334,7 +339,8 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_admin_queue(QueueType queu memset(cq_dma_region->vaddr().as_ptr(), 0, cq_size); { - auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages, Memory::MemoryType::IO)); sq_dma_region = move(buffer); } auto doorbell_regs = TRY(Memory::map_typed_writable(m_bar.offset(REG_SQ0TDBL_START))); @@ -373,7 +379,8 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_io_queue(u8 qid, QueueType auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096); { - auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "IO CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "IO CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages, Memory::MemoryType::IO)); cq_dma_region = move(buffer); } @@ -382,7 +389,8 @@ UNMAP_AFTER_INIT ErrorOr NVMeController::create_io_queue(u8 qid, QueueType memset(cq_dma_region->vaddr().as_ptr(), 0, cq_size); { - auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "IO SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "IO SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages, Memory::MemoryType::IO)); sq_dma_region = move(buffer); } diff --git a/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp b/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp index db4096fff4633a..75c3156ae9e891 100644 --- a/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp +++ b/Kernel/Devices/Storage/NVMe/NVMeQueue.cpp @@ -16,7 +16,8 @@ ErrorOr> NVMeQueue::try_create(NVMeController& devi { // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it) RefPtr rw_dma_page; - auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page, Memory::MemoryType::IO)); if (rw_dma_page.is_null()) return ENOMEM; diff --git a/Kernel/Devices/Storage/SD/SDHostController.cpp b/Kernel/Devices/Storage/SD/SDHostController.cpp index 59f548d3cfddad..07efb23aea6324 100644 --- a/Kernel/Devices/Storage/SD/SDHostController.cpp +++ b/Kernel/Devices/Storage/SD/SDHostController.cpp @@ -114,7 +114,8 @@ ErrorOr SDHostController::initialize() void SDHostController::try_enable_dma() { if (m_registers->capabilities.adma2) { - auto maybe_dma_buffer = MM.allocate_dma_buffer_pages(dma_region_size, "SDHC DMA Buffer"sv, Memory::Region::Access::ReadWrite); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto maybe_dma_buffer = MM.allocate_dma_buffer_pages(dma_region_size, "SDHC DMA Buffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO); if (maybe_dma_buffer.is_error()) { dmesgln("Could not allocate DMA pages for SDHC: {}", maybe_dma_buffer.error()); } else { diff --git a/Kernel/Net/Intel/E1000NetworkAdapter.cpp b/Kernel/Net/Intel/E1000NetworkAdapter.cpp index 2f685c37f2d73b..7282bb4b53c3a5 100644 --- a/Kernel/Net/Intel/E1000NetworkAdapter.cpp +++ b/Kernel/Net/Intel/E1000NetworkAdapter.cpp @@ -174,8 +174,9 @@ UNMAP_AFTER_INIT ErrorOr> E1000NetworkAdapter::cre auto rx_buffer_region = TRY(MM.allocate_contiguous_kernel_region(rx_buffer_size * number_of_rx_descriptors, "E1000 RX buffers"sv, Memory::Region::Access::ReadWrite)); auto tx_buffer_region = MM.allocate_contiguous_kernel_region(tx_buffer_size * number_of_tx_descriptors, "E1000 TX buffers"sv, Memory::Region::Access::ReadWrite).release_value(); - auto rx_descriptors = TRY(Memory::allocate_dma_region_as_typed_array(number_of_rx_descriptors, "E1000 RX Descriptors"sv, Memory::Region::Access::ReadWrite)); - auto tx_descriptors = TRY(Memory::allocate_dma_region_as_typed_array(number_of_tx_descriptors, "E1000 TX Descriptors"sv, Memory::Region::Access::ReadWrite)); + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + auto rx_descriptors = TRY(Memory::allocate_dma_region_as_typed_array(number_of_rx_descriptors, "E1000 RX Descriptors"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); + auto tx_descriptors = TRY(Memory::allocate_dma_region_as_typed_array(number_of_tx_descriptors, "E1000 TX Descriptors"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO)); return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) E1000NetworkAdapter(interface_name.representable_view(), pci_device_identifier, diff --git a/Kernel/Net/Realtek/RTL8168NetworkAdapter.cpp b/Kernel/Net/Realtek/RTL8168NetworkAdapter.cpp index 33532833ff2203..74cfba4dc7bfde 100644 --- a/Kernel/Net/Realtek/RTL8168NetworkAdapter.cpp +++ b/Kernel/Net/Realtek/RTL8168NetworkAdapter.cpp @@ -261,8 +261,9 @@ UNMAP_AFTER_INIT RTL8168NetworkAdapter::RTL8168NetworkAdapter(StringView interfa , PCI::Device(device_identifier) , IRQHandler(irq) , m_registers_io_window(move(registers_io_window)) - , m_rx_descriptors(Memory::allocate_dma_region_as_typed_array(number_of_rx_descriptors + 1, "RTL8168 RX"sv, Memory::Region::Access::ReadWrite).release_value_but_fixme_should_propagate_errors()) - , m_tx_descriptors(Memory::allocate_dma_region_as_typed_array(number_of_tx_descriptors + 1, "RTL8168 TX"sv, Memory::Region::Access::ReadWrite).release_value_but_fixme_should_propagate_errors()) + // FIXME: Synchronize DMA buffer accesses correctly and set the MemoryType to NonCacheable. + , m_rx_descriptors(Memory::allocate_dma_region_as_typed_array(number_of_rx_descriptors + 1, "RTL8168 RX"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO).release_value_but_fixme_should_propagate_errors()) + , m_tx_descriptors(Memory::allocate_dma_region_as_typed_array(number_of_tx_descriptors + 1, "RTL8168 TX"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO).release_value_but_fixme_should_propagate_errors()) { dmesgln_pci(*this, "Found @ {}", device_identifier.address()); dmesgln_pci(*this, "I/O port base: {}", m_registers_io_window);