diff --git a/config.mak b/config.mak index ab9481853..9fffc6c86 100755 --- a/config.mak +++ b/config.mak @@ -3,6 +3,9 @@ # Create a DEBUG build of SGX-LKL DEBUG ?= false +# Use the packed ring implementation of the virtio implementation +PACKED_RING ?= false + # Turn on debug tracing for LKL LKL_DEBUG ?= false @@ -124,6 +127,10 @@ else CMAKE_BUILD_TYPE=Release endif +ifeq ($(PACKED_RING),true) + SGXLKL_CFLAGS_EXTRA += -DPACKED_RING +endif + # OpenEnclave OE_SUBMODULE := $(SGXLKL_ROOT)/openenclave OE_SDK_ROOT_DEFAULT := $(BUILD_DIR)/openenclave diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 90e767846..8435f791f 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -7,17 +7,45 @@ #include #include #include +#include #define min_len(a, b) (a < b ? a : b) +#ifdef PACKED_RING +bool packed_ring = true; +#else +bool packed_ring = false; +#endif + struct _virtio_req { + union { + struct { + struct virtq* q; + }split; + struct { + struct virtq_packed* q; + }packed; + }; struct virtio_req req; struct virtio_dev* dev; - struct virtq* q; + uint16_t idx; }; +/* + * packed_desc_is_avail: Check if the current descriptor + * the driver is expected to fill in is available + * q: pointer to a packed virtio queue + * desc: pointer to the vring descriptor + */ +static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc* desc) +{ + bool avail = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL)); + bool used = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED)); + return avail != used && avail == q->driver_wrap_counter; +} + /* * vring_desc_at_avail_idx : get the pointer to vring descriptor * at given available index from virtio_queue @@ -33,30 +61,48 @@ static inline struct virtq_desc* vring_desc_at_avail_idx( } /* - * add_dev_buf_from_vring_desc: - * read data buffer address from vring descriptors into local buffers + * add_dev_buf_from_vring_desc_split: + * read data buffer address from split vring descriptors into local buffers + * req : local buffer + * vring_desc_split : virtio ring descriptor + */ +static void add_dev_buf_from_vring_desc_split( + struct virtio_req* req, + struct virtq_desc* vring_desc_split) +{ + struct iovec* buf = &req->buf[req->buf_count++]; + + buf->iov_base = (void*)(uintptr_t)(vring_desc_split->addr); + buf->iov_len = vring_desc_split->len; + + req->total_len += buf->iov_len; +} + +/* + * add_dev_buf_from_vring_desc_packed: + * read data buffer address from packed vring descriptors into local buffers * req : local buffer - * vring_desc : virtio ring descriptor + * vring_desc_packed : virtio ring descriptor */ -static void add_dev_buf_from_vring_desc( +static void add_dev_buf_from_vring_desc_packed( struct virtio_req* req, - struct virtq_desc* vring_desc) + struct virtq_packed_desc* vring_desc_packed) { struct iovec* buf = &req->buf[req->buf_count++]; - buf->iov_base = (void*)(uintptr_t)(vring_desc->addr); - buf->iov_len = vring_desc->len; + buf->iov_base = (void*)(uintptr_t)(vring_desc_packed->addr); + buf->iov_len = vring_desc_packed->len; req->total_len += buf->iov_len; } /* - * get_next_desc : get next vring rescriptor pointer + * get_next_desc : get next split vring descriptor pointer * q: Virtio queue * desc: current descriptor * idx : available ring index */ -static struct virtq_desc* get_next_desc( +static struct virtq_desc* get_next_desc_split( struct virtq* q, struct virtq_desc* desc, uint16_t* idx) @@ -77,13 +123,37 @@ static struct virtq_desc* get_next_desc( } /* - * virtio_add_used: update used ring at used index with used discriptor index + * get_next_desc : get next packed vring descriptor pointer + * q: Virtio queue + * desc: current descriptor + * idx : available ring index + */ +static struct virtq_packed_desc* get_next_desc_packed( + struct virtq_packed* q, + struct virtq_packed_desc* desc, + uint16_t* idx) +{ + if (q->max_merge_len) + { + if (++(*idx) == q->num_max) + return NULL; + struct virtq_packed_desc* next_desc = &q->desc[*idx & (q->num-1)]; + packed_desc_is_avail(q,next_desc) ? next_desc : NULL; + } + + if (!(desc->flags & LKL_VRING_DESC_F_NEXT)) + return NULL; + return &q->desc[++(*idx) & (q->num - 1)]; +} + +/* + * virtio_add_used_split: update used ring at used index with used discriptor index * q : input parameter * used_idx : input parameter * avail_idx: input parameter * len : input parameter */ -static inline void virtio_add_used( +static inline void virtio_add_used_split( struct virtq* q, uint16_t used_idx, uint16_t avail_idx, @@ -96,6 +166,31 @@ static inline void virtio_add_used( q->used->ring[used_idx].len = htole16(len); } +static inline void virtio_add_used_packed( + struct virtq_packed* q, + uint16_t used_idx, + uint32_t len, + uint16_t id) +{ + __sync_synchronize(); + struct virtq_packed_desc* desc = &q->desc[used_idx & (q->num -1)]; + desc->id = id; + desc->len = htole32(len); + if (q->device_wrap_counter == 1) + { + desc->flags |= 1 << LKL_VRING_PACKED_DESC_F_AVAIL | + 1 << LKL_VRING_PACKED_DESC_F_USED; + } + else + { + uint16_t avail_set_zero = 1 << LKL_VRING_PACKED_DESC_F_AVAIL; + uint16_t used_set_zero = 1 << LKL_VRING_PACKED_DESC_F_USED; + desc->flags &= + ~avail_set_zero & ~used_set_zero; + } + desc->flags = htole16(desc->flags); +} + /* * virtio_sync_used_idx: update used index * q: virtio queue @@ -148,13 +243,13 @@ static inline void virtio_deliver_irq(struct virtio_dev* dev) * req: local virtio request buffer * len: length of the data processed */ -void virtio_req_complete(struct virtio_req* req, uint32_t len) +static void virtio_req_complete_split(struct virtio_req* req, uint32_t len) { int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); - struct virtq* q = _req->q; + struct virtq* q = _req->split.q; uint16_t avail_idx = _req->idx; - uint16_t used_idx = virtio_get_used_idx(_req->q); + uint16_t used_idx = virtio_get_used_idx(q); /* * We've potentially used up multiple (non-chained) descriptors and have @@ -169,7 +264,7 @@ void virtio_req_complete(struct virtio_req* req, uint32_t len) else used_len = min_len(len, req->buf[i].iov_len); - virtio_add_used(q, used_idx++, avail_idx++, used_len); + virtio_add_used_split(q, used_idx++, avail_idx++, used_len); len -= used_len; if (!len) @@ -226,18 +321,102 @@ void virtio_req_complete(struct virtio_req* req, uint32_t len) } /* - * virtio_process_one: Process one queue at a time + * virtio_req_complete: handle finishing activities after processing request + * req: local virtio request buffer + * len: length of the data processed + */ +static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) +{ + /** + * Requirements for this: + * Setting a single used desc for a descriptor chain + * Ensuring the id of a used desc for a desc chain is the id of the last buffer in the chain + * avail_desc_idx and used_desc_idx to be incremented and wrapped around as appropriate + * changing the wrap counters when the above are wrapped around + * + * This function only gets called either with chained descriptors, + * or max_merge_len (which I assume would also be chained descriptors). + */ + int send_irq = 0; + struct _virtio_req* _req = container_of(req, struct _virtio_req, req); + struct virtq_packed* q = _req->packed.q; + uint16_t avail_desc_idx = _req->idx; + uint16_t used_desc_idx = q->used_desc_idx; + uint16_t prev_used_desc_idx = used_desc_idx; + uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1); + uint16_t used_len, event_idx; + + if (!q->max_merge_len) + used_len = len; + else + used_len = min_len(len, req->buf[req->buf_count-1].iov_len); + + struct virtq_packed_desc* desc = &q->desc[last_buffer_idx & (q->num -1)]; + virtio_add_used_packed(q, used_desc_idx, used_len, desc->id); + + used_desc_idx += req->buf_count; + avail_desc_idx += req->buf_count; + + if (used_desc_idx >= q->num) + { + used_desc_idx -= q->num; + q->device_wrap_counter = !q->device_wrap_counter; + } + + if (avail_desc_idx >= q->num) + { + avail_desc_idx -= q->num; + q->driver_wrap_counter = !q->driver_wrap_counter; + } + + q->used_desc_idx = used_desc_idx; + q->avail_desc_idx = avail_desc_idx; + + if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE) + send_irq = 1; + + else if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC) + { + event_idx = q->driver->off_wrap & ~(1 << LKL_VRING_PACKED_EVENT_F_WRAP_CTR); + //Check if event_idx has been set as used used + // old_used event new_used + // new_used old_used event + // new_used event old_used (X) + // event old_used new_used (X) + if ((used_desc_idx > event_idx && event_idx >= prev_used_desc_idx) || + (used_desc_idx < prev_used_desc_idx && prev_used_desc_idx <= event_idx) || + (used_desc_idx < prev_used_desc_idx && event_idx < used_desc_idx)) + send_irq = 1; + } + + if (send_irq) + virtio_deliver_irq(_req->dev); +} + +/* + * virtio_req_complete: handle finishing activities after processing request + * req: local virtio request buffer + * len: length of the data processed + */ +void virtio_req_complete(struct virtio_req* req, uint32_t len) +{ + packed_ring ? virtio_req_complete_packed(req, len) : + virtio_req_complete_split(req, len); +} + +/* + * virtio_process_one: Process one split queue at a time * dev: device structure pointer * qidx: queue index to be processed */ -static int virtio_process_one(struct virtio_dev* dev, int qidx) +static int virtio_process_one_split(struct virtio_dev* dev, int qidx) { - struct virtq* q = &dev->queue[qidx]; + struct virtq* q = &dev->split.queue[qidx]; uint16_t idx = q->last_avail_idx; struct _virtio_req _req = { .dev = dev, - .q = q, + .split.q = q, .idx = idx, }; @@ -246,10 +425,41 @@ static int virtio_process_one(struct virtio_dev* dev, int qidx) struct virtq_desc* desc = vring_desc_at_avail_idx(q, idx); do { - add_dev_buf_from_vring_desc(req, desc); + add_dev_buf_from_vring_desc_split(req, desc); if (q->max_merge_len && req->total_len > q->max_merge_len) break; - desc = get_next_desc(q, desc, &idx); + desc = get_next_desc_split(q, desc, &idx); + } while (desc && req->buf_count < VIRTIO_REQ_MAX_BUFS); + + // Return result of enqueue operation + return dev->ops->enqueue(dev, qidx, req); +} + +/* + * virtio_process_one: Process one packed queue at a time + * dev: device structure pointer + * qidx: queue index to be processed + */ +static int virtio_process_one_packed(struct virtio_dev* dev, int qidx) +{ + struct virtq_packed* q = &dev->packed.queue[qidx]; + uint16_t idx = q->avail_desc_idx; + + struct _virtio_req _req = { + .dev = dev, + .packed.q = q, + .idx = idx, + }; + + struct virtio_req* req = &_req.req; + struct virtq_packed_desc* desc = &q->desc[idx & (q->num - 1)]; + do + { + add_dev_buf_from_vring_desc_packed(req, desc); + // Do we need this + if (q->max_merge_len && req->total_len > q->max_merge_len) + break; + desc = get_next_desc_packed(q, desc, &idx); } while (desc && req->buf_count < VIRTIO_REQ_MAX_BUFS); // Return result of enqueue operation @@ -261,20 +471,31 @@ static inline void virtio_set_avail_event(struct virtq* q, uint16_t val) *((uint16_t*)&q->used->ring[q->num]) = val; } +static void virtio_set_queue_max_merge_len_split(struct virtio_dev* dev, int q, int len) +{ + dev->split.queue[q].max_merge_len = len; +} + +static void virtio_set_queue_max_merge_len_packed(struct virtio_dev* dev, int q, int len) +{ + dev->packed.queue[q].max_merge_len = len; +} + void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len) { - dev->queue[q].max_merge_len = len; + packed_ring ? virtio_set_queue_max_merge_len_packed(dev, q, len) : + virtio_set_queue_max_merge_len_split(dev, q, len); } /* - * virtio_process_queue : process all the requests in the specific queue + * virtio_process_queue : process all the requests in the specific split queue * dev: virtio device structure pointer * qidx: queue index to be processed * fd: disk file descriptor */ -void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) +static void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) { - struct virtq* q = &dev->queue[qidx]; + struct virtq* q = &dev->split.queue[qidx]; if (!q->ready) return; @@ -285,7 +506,7 @@ void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) while (q->last_avail_idx != q->avail->idx) { /* Make sure following loads happens after loading q->avail->idx */ - if (virtio_process_one(dev, qidx) < 0) + if (virtio_process_one_split(dev, qidx) < 0) break; if (q->last_avail_idx == le16toh(q->avail->idx)) virtio_set_avail_event(q, q->avail->idx); @@ -294,3 +515,50 @@ void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); } + +/* + * virtio_process_queue : process all the requests in the specific packed queue + * dev: virtio device structure pointer + * qidx: queue index to be processed + * fd: disk file descriptor + */ +static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) +{ + struct virtq_packed* q = &dev->packed.queue[qidx]; + + if (!q->ready) + return; + + if (dev->ops->acquire_queue) + dev->ops->acquire_queue(dev, qidx); + + __sync_synchronize(); + q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_DISABLE; + + while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) + { + // Need to process desc here + // Possible make some process_one_packed + // Question is what else do I include in this statement + if (virtio_process_one_packed(dev, qidx) < 0) + break; + } + + __sync_synchronize(); + q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; + + if (dev->ops->release_queue) + dev->ops->release_queue(dev, qidx); +} + +/* + * virtio_process_queue : process all the requests in the specific queue + * dev: virtio device structure pointer + * qidx: queue index to be processed + * fd: disk file descriptor + */ +void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) +{ + packed_ring ? virtio_process_queue_packed(dev, qidx) : + virtio_process_queue_split(dev, qidx); +} \ No newline at end of file diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index e62c15db2..0d0e3144b 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -19,6 +19,7 @@ extern sgxlkl_host_state_t sgxlkl_host_state; #if DEBUG && VIRTIO_TEST_HOOK +#include static uint64_t virtio_blk_req_cnt; #endif // DEBUG && VIRTIO_TEST_HOOK @@ -49,6 +50,7 @@ static int blk_enqueue(struct virtio_dev* dev, int q, struct virtio_req* req) if (req->buf_count < 3) goto out; + h = req->buf[0].iov_base; t = req->buf[req->buf_count - 1].iov_base; @@ -109,10 +111,16 @@ int blk_device_init( size_t disk_index, int enable_swiotlb) { + void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); - size_t vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq); + size_t vq_size; + + if (!packed_ring) + vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq); + else + vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq_packed); /*Allocate memory for block device*/ bdev_size = next_pow2(bdev_size); @@ -140,10 +148,29 @@ int blk_device_init( } /* Initialize block device */ - host_blk_device->dev.queue = vq_mem; - memset(host_blk_device->dev.queue, 0, vq_size); + if (!packed_ring) + { + host_blk_device->dev.split.queue = vq_mem; + memset(host_blk_device->dev.split.queue, 0, vq_size); + } + else + { + host_blk_device->dev.packed.queue = vq_mem; + memset(host_blk_device->dev.packed.queue, 0, vq_size); + } for (int i = 0; i < HOST_BLK_DEV_NUM_QUEUES; i++) - host_blk_device->dev.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + { + if (!packed_ring) + { + host_blk_device->dev.split.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + } + else + { + host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + host_blk_device->dev.packed.queue[i].device_wrap_counter = 1; + host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1; + } + } host_blk_device->config.capacity = disk->size / 512; @@ -158,6 +185,9 @@ int blk_device_init( host_blk_device->dev.device_features |= BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_RING_F_EVENT_IDX); + if (packed_ring) + host_blk_device->dev.device_features |= BIT(VIRTIO_F_RING_PACKED); + if (enable_swiotlb) host_blk_device->dev.device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM); diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index ccb15486d..20d4193d0 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -105,7 +105,9 @@ void* monitor_console_input(void* cons_dev) break; if (ret & DEV_CONSOLE_WRITE) + { virtio_process_queue(dev, RX_QUEUE_ID); + } } while (1); return NULL; } @@ -228,7 +230,13 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) void* console_vq_mem = NULL; size_t host_console_size = next_pow2(sizeof(struct virtio_console_dev)); - size_t console_vq_size = NUM_QUEUES * sizeof(struct virtq); + size_t console_vq_size; + + if (!packed_ring) + console_vq_size = NUM_QUEUES * sizeof(struct virtq); + else + console_vq_size = NUM_QUEUES * sizeof(struct virtq_packed); + console_vq_size = next_pow2(console_vq_size); /* Console host device configuration */ @@ -268,12 +276,31 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) _console_dev->out_console_fd = STDOUT_FILENO; struct virtio_dev* dev = &_console_dev->dev; - dev->queue = console_vq_mem; - memset(dev->queue, 0, console_vq_size); + if (!packed_ring) + { + dev->split.queue = console_vq_mem; + memset(dev->split.queue, 0, console_vq_size); + } + else + { + dev->packed.queue = console_vq_mem; + memset(dev->packed.queue, 0, console_vq_size); + } /* assign the queue depth to each virt queue */ for (int i = 0; i < NUM_QUEUES; i++) - dev->queue[i].num_max = QUEUE_DEPTH; + { + if (!packed_ring) + { + dev->split.queue[i].num_max = QUEUE_DEPTH; + } + else + { + dev->packed.queue[i].num_max = QUEUE_DEPTH; + dev->packed.queue[i].device_wrap_counter = 1; + dev->packed.queue[i].driver_wrap_counter = 1; + } + } /* set console device feature */ dev->device_id = VIRTIO_ID_CONSOLE; @@ -286,6 +313,9 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) if (host_state->enclave_config.mode != SW_DEBUG_MODE) dev->device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM); + if (packed_ring) + dev->device_features |= BIT(VIRTIO_F_RING_PACKED); + dev->ops = &host_console_ops; _console_dev->qlocks = init_queue_locks(NUM_QUEUES); diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 467d7b766..c82980081 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -534,6 +534,7 @@ int netdev_init(sgxlkl_host_state_t* host_state) void* netdev_vq_mem = NULL; struct virtio_net_dev* net_dev = NULL; char mac[6]; + size_t netdev_vq_size; // Generate a completely random MAC address size_t b = 0; while (b < sizeof(mac)) { @@ -550,7 +551,11 @@ int netdev_init(sgxlkl_host_state_t* host_state) mac[0] &= 0xfe; size_t host_netdev_size = next_pow2(sizeof(struct virtio_net_dev)); - size_t netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); + + if (!packed_ring) + netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); + else + netdev_vq_size = NUM_QUEUES * sizeof(struct virtq_packed); netdev_vq_size = next_pow2(netdev_vq_size); if (!_netdev_id) @@ -589,12 +594,30 @@ int netdev_init(sgxlkl_host_state_t* host_state) return -1; } - net_dev->dev.queue = netdev_vq_mem; - memset(net_dev->dev.queue, 0, netdev_vq_size); - + if (!packed_ring) + { + net_dev->dev.split.queue = netdev_vq_mem; + memset(net_dev->dev.split.queue, 0, netdev_vq_size); + } + else + { + net_dev->dev.packed.queue = netdev_vq_mem; + memset(net_dev->dev.packed.queue, 0, netdev_vq_size); + } /* assign the queue depth to each virt queue */ for (int i = 0; i < NUM_QUEUES; i++) - net_dev->dev.queue[i].num_max = QUEUE_DEPTH; + { + if (!packed_ring) + { + net_dev->dev.split.queue[i].num_max = QUEUE_DEPTH; + } + else + { + net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; + net_dev->dev.packed.queue[i].device_wrap_counter = 1; + net_dev->dev.packed.queue[i].driver_wrap_counter = 1; + } + } /* set net device feature */ net_dev->dev.device_id = VIRTIO_ID_NET; @@ -617,6 +640,10 @@ int netdev_init(sgxlkl_host_state_t* host_state) } net_dev->dev.device_features |= BIT(VIRTIO_NET_F_MAC); + + if (packed_ring) + net_dev->dev.device_features |= BIT(VIRTIO_F_RING_PACKED); + memcpy(net_dev->config.mac, mac, ETH_ALEN); net_dev->dev.config_data = &net_dev->config; @@ -629,7 +656,9 @@ int netdev_init(sgxlkl_host_state_t* host_state) * there are available up to 64KB in total len. */ if (net_dev->dev.device_features & BIT(VIRTIO_NET_F_MRG_RXBUF)) + { virtio_set_queue_max_merge_len(&net_dev->dev, RX_QUEUE_IDX, 65536); + } /* Register the netdev fd */ register_net_device(net_dev, host_state->net_fd); diff --git a/src/include/host/virtio_dev.h b/src/include/host/virtio_dev.h index 673578d58..39939a2bb 100644 --- a/src/include/host/virtio_dev.h +++ b/src/include/host/virtio_dev.h @@ -9,6 +9,9 @@ #define VIRTIO_F_VERSION_1 32 #define VIRTIO_RING_F_EVENT_IDX 29 #define VIRTIO_F_IOMMU_PLATFORM 33 +#define VIRTIO_F_RING_PACKED 34 + +extern bool packed_ring; struct virtio_dev; @@ -57,7 +60,15 @@ struct virtio_dev uint64_t driver_features; _Atomic(uint32_t) driver_features_sel; _Atomic(uint32_t) queue_sel; - struct virtq* queue; + union { + struct { + struct virtq* queue; + }split; + + struct { + struct virtq_packed* queue; + }packed; + }; uint32_t queue_notify; _Atomic(uint32_t) int_status; _Atomic(uint32_t) status; diff --git a/src/include/lkl/virtio.h b/src/include/lkl/virtio.h index 91f468c85..65e5832d8 100644 --- a/src/include/lkl/virtio.h +++ b/src/include/lkl/virtio.h @@ -16,7 +16,15 @@ struct virtio_dev uint64_t driver_features; _Atomic(uint32_t) driver_features_sel; _Atomic(uint32_t) queue_sel; - struct virtq* queue; + union { + struct { + struct virtq* queue; + }split; + + struct { + struct virtq_packed* queue; + }packed; + }; uint32_t queue_notify; _Atomic(uint32_t) int_status; _Atomic(uint32_t) status; @@ -35,6 +43,7 @@ struct virtio_dev */ int lkl_virtio_dev_setup( struct virtio_dev* dev, + struct virtio_dev* dev_host, int mmio_size, void* virtio_req_complete); @@ -44,4 +53,9 @@ int lkl_virtio_dev_setup( */ void lkl_virtio_deliver_irq(uint8_t dev_id); -#endif /* _LKL_LIB_VIRTIO_H */ +/* + * Function to allocate memory for a shadow virtio dev + */ +struct virtio_dev* alloc_shadow_virtio_dev(); + +#endif //_LKL_LIB_VIRTIO_H \ No newline at end of file diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index dba1449e8..17e922ef4 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -9,6 +9,28 @@ #define LKL_VRING_DESC_F_WRITE 2 /* This means the buffer contains a list of buffer descriptors. */ #define LKL_VRING_DESC_F_INDIRECT 4 +/* + * Mark a descriptor as available or used in packed ring. + * Notice: they are defined as shifts instead of shifted values. + */ +#define LKL_VRING_PACKED_DESC_F_AVAIL 7 +#define LKL_VRING_PACKED_DESC_F_USED 15 +/* Enable events in packed ring. */ +#define LKL_VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ +#define LKL_VRING_PACKED_EVENT_FLAG_DISABLE 0x1 +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated. + */ +#define LKL_VRING_PACKED_EVENT_FLAG_DESC 0x2 +/* + * Wrap counter bit shift in event suppression structure + * of packed ring. + */ +#define LKL_VRING_PACKED_EVENT_F_WRAP_CTR 15 + struct virtq_desc { @@ -58,4 +80,39 @@ struct virtq uint16_t last_used_idx_signaled; }; +struct virtq_packed_desc +{ + /* Address (guest-physical). */ + uint64_t addr; + /* Length. */ + uint32_t len; + /* Buffer ID. */ + uint16_t id; + /* The flags as indicated above. */ + uint16_t flags; +}; + +struct virtq_packed_desc_event +{ + /* Descriptor Ring Change Event Offset/Wrap Counter. */ + uint16_t off_wrap; + /* Descriptor Ring Change Event Flags. */ + uint16_t flags; +}; + +struct virtq_packed +{ + uint32_t num_max; + _Atomic(uint32_t) ready; + _Atomic(uint32_t) num; + uint32_t max_merge_len; + + _Atomic(struct virtq_packed_desc*) desc; + _Atomic(struct virtq_packed_desc_event*) driver; + _Atomic(struct virtq_packed_desc_event*) device; + bool device_wrap_counter; + bool driver_wrap_counter; + uint16_t avail_desc_idx; + uint16_t used_desc_idx; +}; #endif diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index fe9c99f3a..1b57e4bab 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -5,16 +5,20 @@ #include #include #include +#include #include #include +#include #include #include +#include #include +#include #include #include "enclave/vio_enclave_event_channel.h" #include #include - +#include #include "openenclave/corelibc/oestring.h" // from inttypes.h @@ -23,9 +27,28 @@ #define VIRTIO_DEV_MAGIC 0x74726976 #define VIRTIO_DEV_VERSION 2 +#define BLK_DEV_NUM_QUEUES 1 +#define NET_DEV_NUM_QUEUES 2 +#define CONSOLE_NUM_QUEUES 2 + +#define BLK_DEV_QUEUE_DEPTH 32 +#define CONSOLE_QUEUE_DEPTH 32 +#define NET_DEV_QUEUE_DEPTH 128 + #undef BIT #define BIT(x) (1ULL << x) +#ifdef PACKED_RING +bool packed_ring = true; +#else +bool packed_ring = false; +#endif + +#ifdef DEBUG +#include +#endif + + /* Used for notifying LKL for the list of virtio devices at bootup. * Currently block, network and console devices are passed */ char lkl_virtio_devs[4096]; @@ -42,6 +65,18 @@ static uint32_t lkl_num_virtio_boot_devs; typedef void (*lkl_virtio_dev_deliver_irq)(uint64_t dev_id); static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; +static struct virtio_dev* dev_hosts[DEVICE_COUNT]; + +/* + * Used for switching between the host and shadow dev structure based + * on the virtio_read/write request + */ +struct virtio_dev_handle +{ + struct virtio_dev* dev; //shadow structure in guest memory + struct virtio_dev* dev_host; +}; + /* * virtio_read_device_features: Read Device Features * dev : pointer to device structure @@ -55,6 +90,17 @@ static inline uint32_t virtio_read_device_features(struct virtio_dev* dev) return (uint32_t)dev->device_features; } +/* + * virtio_has_feature: Return whether feature bit has been set on virtio device + * dev: pointer to device structure + * bit: feature bit + * return whether feature bit is set + */ +static bool virtio_has_feature(struct virtio_dev* dev, unsigned int bit) +{ + return dev->device_features & BIT(bit); +} + /* * virtio_read: Process read requests from virtio_mmio * data: virtio_dev pointer @@ -85,7 +131,9 @@ static int virtio_read(void* data, int offset, void* res, int size) * shadow structure init routine copy the content from the host structure. */ uint32_t val = 0; - struct virtio_dev* dev = (struct virtio_dev*)data; + struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; + struct virtio_dev* dev = dev_handle->dev; + struct virtio_dev* dev_host = dev_handle->dev_host; if (offset >= VIRTIO_MMIO_CONFIG) { @@ -123,18 +171,24 @@ static int virtio_read(void* data, int offset, void* res, int size) * host-write-once */ case VIRTIO_MMIO_QUEUE_NUM_MAX: - val = dev->queue[dev->queue_sel].num_max; + val = packed_ring ? dev->packed.queue[dev->queue_sel].num_max : + dev->split.queue[dev->queue_sel].num_max; break; case VIRTIO_MMIO_QUEUE_READY: - val = dev->queue[dev->queue_sel].ready; + val = packed_ring ? dev->packed.queue[dev->queue_sel].ready : + dev->split.queue[dev->queue_sel].ready; break; /* Security Review: dev->int_status is host-read-write */ case VIRTIO_MMIO_INTERRUPT_STATUS: - val = dev->int_status; + val = dev_host->int_status; + if (dev->int_status != val) + dev->int_status = val; break; /* Security Review: dev->status is host-read-write */ case VIRTIO_MMIO_STATUS: - val = dev->status; + val = dev_host->status; + if (dev->status != val) + dev->status = val; break; /* Security Review: dev->config_gen should be host-write-once */ case VIRTIO_MMIO_CONFIG_GENERATION: @@ -257,9 +311,12 @@ static int virtio_write(void* data, int offset, void* res, int size) * perform copy-through write (write to shadow structure & to host * structure). virtq desc and avail ring address handling is a special case. */ - struct virtio_dev* dev = (struct virtio_dev*)data; - /* Security Review: dev->queue_sel should be host-read-only */ - struct virtq* q = &dev->queue[dev->queue_sel]; + struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; + struct virtio_dev* dev = dev_handle->dev; + struct virtio_dev* dev_host = dev_handle->dev_host; + + struct virtq* split_q = packed_ring ? NULL : &dev_host->split.queue[dev->queue_sel]; + struct virtq_packed* packed_q = packed_ring ? &dev_host->packed.queue[dev->queue_sel] : NULL; uint32_t val; int ret = 0; @@ -288,31 +345,53 @@ static int virtio_write(void* data, int offset, void* res, int size) if (val > 1) return -LKL_EINVAL; dev->device_features_sel = val; + dev_host->device_features_sel = val; break; /* Security Review: dev->driver_features_sel should be host-read-only */ case VIRTIO_MMIO_DRIVER_FEATURES_SEL: if (val > 1) return -LKL_EINVAL; dev->driver_features_sel = val; + dev_host->driver_features_sel = val; break; /* Security Review: dev->driver_features should be host-read-only */ case VIRTIO_MMIO_DRIVER_FEATURES: virtio_write_driver_features(dev, val); + virtio_write_driver_features(dev_host, val); break; /* Security Review: dev->queue_sel should be host-read-only */ case VIRTIO_MMIO_QUEUE_SEL: dev->queue_sel = val; + dev_host->queue_sel = val; break; /* Security Review: dev->queue[dev->queue_sel].num should be * host-read-only */ case VIRTIO_MMIO_QUEUE_NUM: - dev->queue[dev->queue_sel].num = val; + if (packed_ring) + { + dev->packed.queue[dev->queue_sel].num = val; + dev_host->packed.queue[dev->queue_sel].num = val; + } + else + { + dev->split.queue[dev->queue_sel].num = val; + dev_host->split.queue[dev->queue_sel].num = val; + } break; /* Security Review: is dev->queue[dev->queue_sel].ready host-read-only? */ case VIRTIO_MMIO_QUEUE_READY: - dev->queue[dev->queue_sel].ready = val; + if (packed_ring) + { + dev->packed.queue[dev->queue_sel].ready = val; + dev_host->packed.queue[dev->queue_sel].ready = val; + } + else + { + dev->split.queue[dev->queue_sel].ready = val; + dev_host->split.queue[dev->queue_sel].ready = val; + } break; /* Security Review: guest virtio driver(s) writes to virtq desc ring and * avail ring in guest memory. In queue notify flow, we need to copy the @@ -324,10 +403,12 @@ static int virtio_write(void* data, int offset, void* res, int size) /* Security Review: dev->int_status is host-read-write */ case VIRTIO_MMIO_INTERRUPT_ACK: dev->int_status = 0; + dev_host->int_status = 0; break; /* Security Review: dev->status is host-read-write */ case VIRTIO_MMIO_STATUS: set_status(dev, val); + set_status(dev_host, val); break; /* Security Review: For Split Queue, q->desc link list * content should be host-read-only. The Split Queue implementaiton @@ -346,10 +427,16 @@ static int virtio_write(void* data, int offset, void* res, int size) * be required. */ case VIRTIO_MMIO_QUEUE_DESC_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->desc, val); + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->desc, val); + else + set_ptr_low((_Atomic(uint64_t)*)&split_q->desc, val); break; case VIRTIO_MMIO_QUEUE_DESC_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->desc, val); + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->desc, val); + else + set_ptr_high((_Atomic(uint64_t)*)&split_q->desc, val); break; /* Security Review: For Split Queue, q->avail link list content should be * host-read-only. The Split Queue implementaiton @@ -364,10 +451,16 @@ static int virtio_write(void* data, int offset, void* res, int size) * to it. */ case VIRTIO_MMIO_QUEUE_AVAIL_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->avail, val); + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->driver, val); + else + set_ptr_low((_Atomic(uint64_t)*)&split_q->avail, val); break; case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->avail, val); + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->driver, val); + else + set_ptr_high((_Atomic(uint64_t)*)&split_q->avail, val); break; /* Security Review: For Split Queue, q->used link list content should be * guest-read-only. The Split Queue implementaiton in guest side virtio @@ -382,10 +475,16 @@ static int virtio_write(void* data, int offset, void* res, int size) * functionality. */ case VIRTIO_MMIO_QUEUE_USED_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->used, val); + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->device, val); + else + set_ptr_low((_Atomic(uint64_t)*)&split_q->used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->used, val); + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->device, val); + else + set_ptr_high((_Atomic(uint64_t)*)&split_q->used, val); break; default: ret = -1; @@ -399,41 +498,201 @@ static const struct lkl_iomem_ops virtio_ops = { .write = virtio_write, }; +static int device_num_queues(int device_id) +{ + switch(device_id) + { + case VIRTIO_ID_NET: + return NET_DEV_NUM_QUEUES; + case VIRTIO_ID_CONSOLE: + return CONSOLE_NUM_QUEUES; + case VIRTIO_ID_BLOCK: + return BLK_DEV_NUM_QUEUES; + default: + return 0; + } +} + /* * lkl_virtio_deliver_irq : Deliver the irq request to device task * dev_id : Device id for which irq needs to be delivered. */ void lkl_virtio_deliver_irq(uint8_t dev_id) { + // Get sgxlkl_enclave_state if (virtio_deliver_irq[dev_id]) virtio_deliver_irq[dev_id](dev_id); } +static void* copy_queue(struct virtio_dev* dev) +{ + void* vq_mem = NULL; + struct virtq_packed* dest_packed = NULL; + struct virtq* dest_split = NULL; + size_t vq_size = 0; + int num_queues = device_num_queues(dev->device_id); + + if (packed_ring) + { + vq_size = next_pow2(num_queues * sizeof(struct virtq_packed)); + } + else + { + vq_size = next_pow2(num_queues * sizeof(struct virtq)); + } + + vq_mem = sgxlkl_host_ops.mem_alloc(vq_size); + + if (!vq_mem) + { + sgxlkl_error("Queue mem alloc failed\n"); + return NULL; + } + + if (packed_ring) + dest_packed = vq_mem; + else + dest_split = vq_mem; + + for (int i = 0; i < num_queues; i++) + { + if (packed_ring) + { + dest_packed[i].num_max = dev->packed.queue[i].num_max; + } + + else + { + dest_split[i].num_max = dev->split.queue[i].num_max; + } + } + return packed_ring ? (void *) dest_packed : (void *) dest_split; +} + +static bool virtqueues_in_shared_memory(struct virtio_dev* dev) +{ + int num_queues = device_num_queues(dev->device_id); + + for (int i = 0; i < num_queues; i++) + { + if (packed_ring) + { + if((oe_is_within_enclave(&dev->packed.queue[i], sizeof(struct virtq_packed)))) + return false; + } + + else + { + if((oe_is_within_enclave(&dev->split.queue[i], sizeof(struct virtq)))) + return false; + } + } + + return true; +} + +static bool supported_device(struct virtio_dev* dev) +{ + return dev->device_id == VIRTIO_ID_NET || + dev->device_id == VIRTIO_ID_CONSOLE || + dev->device_id == VIRTIO_ID_BLOCK; +} + /* * Function to setup the virtio device setting */ int lkl_virtio_dev_setup( struct virtio_dev* dev, + struct virtio_dev* dev_host, int mmio_size, void* deliver_irq_cb) { + struct virtio_dev_handle* dev_handle; int avail = 0, num_bytes = 0, ret = 0; + size_t dev_handle_size = next_pow2(sizeof(struct virtio_dev_handle)); + dev_handle = sgxlkl_host_ops.mem_alloc(dev_handle_size); + + if (!dev_handle) + { + sgxlkl_error("Failed to allocate memory for dev handle\n"); + return -1; + } + + dev_handle->dev = dev; + dev_handle->dev_host = dev_host; + + dev->device_id = dev_host->device_id; + dev->vendor_id = dev_host->vendor_id; + dev->config_gen = dev_host->config_gen; + dev->device_features = dev_host->device_features; + dev->config_len = dev_host->config_len; + dev->int_status = dev_host->int_status; + + if (!supported_device(dev)) + { + sgxlkl_error("Unsupported device, device id: %d\n", dev->device_id); + return -1; + } + + if (dev->config_len != 0) + { + dev->config_data = sgxlkl_host_ops.mem_alloc(next_pow2(dev->config_len)); + if (!dev->config_data) + { + sgxlkl_error("Failed to allocate memory for dev config data\n"); + return -1; + } + memcpy(dev->config_data, dev_host->config_data, dev->config_len); + } + + if (packed_ring) + { + dev->packed.queue = copy_queue(dev_host); + if (!dev->packed.queue) + { + sgxlkl_error("Failed to copy packed virtqueue into shadow structure\n"); + return -1; + } + } + else + { + dev->split.queue = copy_queue(dev_host); + if (!dev->split.queue) + { + sgxlkl_error("Failed to copy split virtqueue into shadow structure\n"); + return -1; + } + } + + if (!virtqueues_in_shared_memory(dev_host)) + { + sgxlkl_error("Virtqueue arrays not in shared memory\n"); + return -1; + } + dev->irq = lkl_get_free_irq("virtio"); + dev_host->irq = dev->irq; + dev_host->int_status = 0; dev->int_status = 0; + if (dev->irq < 0) return 1; - /* Security Review: dev-vendor_id might cause overflow in - * virtio_deliver_irq[DEVICE_COUNT] - */ + if (packed_ring && !virtio_has_feature(dev, VIRTIO_F_RING_PACKED)) + { + sgxlkl_error("Device %d does not support virtio packed ring\n", dev->device_id); + return -1; + } + + if (dev->vendor_id >= DEVICE_COUNT) + { + sgxlkl_error("Too many devices. Only %d devices are supported\n", DEVICE_COUNT); + return -1; + } + virtio_deliver_irq[dev->vendor_id] = deliver_irq_cb; - /* Security Review: pass handle instead of virtio_dev pointer to the rest of - * the system. - */ - /* Security Review: shadow dev->base used in guest side only. No - * copy-through to the host side structure - */ - dev->base = register_iomem(dev, mmio_size, &virtio_ops); + dev_hosts[dev->vendor_id] = dev_host; + dev->base = register_iomem(dev_handle, mmio_size, &virtio_ops); if (!lkl_is_running()) { @@ -473,3 +732,20 @@ int lkl_virtio_dev_setup( } return 0; } + +/* + * Function to allocate memory for a shadow virtio dev + */ +struct virtio_dev* alloc_shadow_virtio_dev() +{ + size_t dev_size = next_pow2(sizeof(struct virtio_dev)); + + struct virtio_dev* dev = sgxlkl_host_ops.mem_alloc(dev_size); + + if (!dev) + { + sgxlkl_error("Shadow device alloc failed\n"); + return NULL; + } + return dev; +} \ No newline at end of file diff --git a/src/lkl/virtio_blkdev.c b/src/lkl/virtio_blkdev.c index c98ed0517..54fb1ca94 100644 --- a/src/lkl/virtio_blkdev.c +++ b/src/lkl/virtio_blkdev.c @@ -8,14 +8,34 @@ #include "enclave/vio_enclave_event_channel.h" #include "lkl/virtio.h" +#define MAX_BLOCK_DEVS 32 + +static uint8_t registered_shadow_dev_idx = 0; + +static struct virtio_dev* registered_shadow_devs[MAX_BLOCK_DEVS]; + +/* + * Function to get shadow blkdev instance to use its attributes + */ +static inline struct virtio_dev* get_blkdev_instance(uint8_t blkdev_id) +{ + for (size_t i = 0; i < registered_shadow_dev_idx; i++) + if (registered_shadow_devs[i]->vendor_id == blkdev_id) + return registered_shadow_devs[i]; + SGXLKL_ASSERT(false); +} + /* * Function to trigger block dev irq to notify front end driver */ static void lkl_deliver_irq(uint8_t dev_id) { - struct virtio_dev* dev = + struct virtio_dev* dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem[dev_id]; + struct virtio_dev* dev = get_blkdev_instance(dev_id); + + dev_host->int_status |= VIRTIO_MMIO_INT_VRING; dev->int_status |= VIRTIO_MMIO_INT_VRING; lkl_trigger_irq(dev->irq); @@ -29,20 +49,35 @@ int lkl_add_disks( const sgxlkl_enclave_mount_config_t* mounts, size_t num_mounts) { - struct virtio_dev* root_dev = + struct virtio_dev* root_dev = alloc_shadow_virtio_dev(); + if (!root_dev) + return -1; + + struct virtio_dev* root_dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem - [sgxlkl_enclave_state.disk_state[0].host_disk_index]; - int mmio_size = VIRTIO_MMIO_CONFIG + root_dev->config_len; - if (lkl_virtio_dev_setup(root_dev, mmio_size, lkl_deliver_irq) != 0) + [sgxlkl_enclave_state.disk_state[0].host_disk_index]; + + int mmio_size = VIRTIO_MMIO_CONFIG + root_dev_host->config_len; + + registered_shadow_devs[registered_shadow_dev_idx++] = root_dev; + + if (lkl_virtio_dev_setup(root_dev, root_dev_host, mmio_size, lkl_deliver_irq) != 0) return -1; for (size_t i = 0; i < num_mounts; ++i) { - struct virtio_dev* dev = + struct virtio_dev* dev = alloc_shadow_virtio_dev(); + if (!dev) + return -1; + + struct virtio_dev* dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem [sgxlkl_enclave_state.disk_state[i + 1].host_disk_index]; - int mmio_size = VIRTIO_MMIO_CONFIG + dev->config_len; - if (lkl_virtio_dev_setup(dev, mmio_size, lkl_deliver_irq) != 0) + + int mmio_size = VIRTIO_MMIO_CONFIG + dev_host->config_len; + registered_shadow_devs[registered_shadow_dev_idx++] = dev; + + if (lkl_virtio_dev_setup(dev, dev_host, mmio_size, lkl_deliver_irq) != 0) return -1; } return 0; diff --git a/src/lkl/virtio_console.c b/src/lkl/virtio_console.c index 92c2971f1..d595648d9 100644 --- a/src/lkl/virtio_console.c +++ b/src/lkl/virtio_console.c @@ -9,29 +9,35 @@ #include "enclave/ticketlock.h" #include "lkl/virtio.h" +static struct virtio_dev* console; + /* * Function to generate an interrupt for LKL kernel to reap the virtQ data */ static void lkl_deliver_irq(uint64_t dev_id) { - struct virtio_dev* dev = + struct virtio_dev* dev_host = sgxlkl_enclave_state.shared_memory.virtio_console_mem; - dev->int_status |= VIRTIO_MMIO_INT_VRING; + dev_host->int_status |= VIRTIO_MMIO_INT_VRING; + console->int_status |= VIRTIO_MMIO_INT_VRING; - lkl_trigger_irq(dev->irq); + lkl_trigger_irq(console->irq); } /* * Function to add a new net device to LKL */ -int lkl_virtio_console_add(struct virtio_dev* console) +int lkl_virtio_console_add(struct virtio_dev* console_host) { int ret = -1; + console = alloc_shadow_virtio_dev(); - int mmio_size = VIRTIO_MMIO_CONFIG + console->config_len; + if (!console) + return -1; - ret = lkl_virtio_dev_setup(console, mmio_size, &lkl_deliver_irq); + int mmio_size = VIRTIO_MMIO_CONFIG + console_host->config_len; + ret = lkl_virtio_dev_setup(console, console_host, mmio_size, &lkl_deliver_irq); return ret; } diff --git a/src/lkl/virtio_netdev.c b/src/lkl/virtio_netdev.c index 45934e498..abef5cf63 100644 --- a/src/lkl/virtio_netdev.c +++ b/src/lkl/virtio_netdev.c @@ -11,18 +11,23 @@ #define MAX_NET_DEVS 16 -static uint8_t registered_dev_idx = 0; +struct dev_handle +{ + struct virtio_dev* dev_host; + struct virtio_dev* dev; +}; -struct virtio_dev* registered_devs[MAX_NET_DEVS]; +struct dev_handle devs[MAX_NET_DEVS]; +static uint8_t registered_dev_idx = 0; /* * Function to get netdev instance to use its attributes */ -static inline struct virtio_dev* get_netdev_instance(uint8_t netdev_id) +static inline struct dev_handle* get_netdev_instance(uint8_t netdev_id) { for (size_t i = 0; i < registered_dev_idx; i++) - if (registered_devs[i]->vendor_id == netdev_id) - return registered_devs[i]; + if (devs[i].dev->vendor_id == netdev_id) + return &devs[i]; SGXLKL_ASSERT(false); } @@ -38,11 +43,6 @@ static int dev_register(struct virtio_dev* dev) sgxlkl_info("Too many virtio_net devices!\n"); ret = -LKL_ENOMEM; } - else - { - /* registered_dev_idx is incremented by the caller */ - registered_devs[registered_dev_idx] = dev; - } return ret; } @@ -51,24 +51,31 @@ static int dev_register(struct virtio_dev* dev) */ static void lkl_deliver_irq(uint64_t dev_id) { - struct virtio_dev* dev = get_netdev_instance(dev_id); + struct dev_handle* dev_pair = get_netdev_instance(dev_id); - dev->int_status |= VIRTIO_MMIO_INT_VRING; + dev_pair->dev->int_status |= VIRTIO_MMIO_INT_VRING; + dev_pair->dev_host->int_status |= VIRTIO_MMIO_INT_VRING; - lkl_trigger_irq(dev->irq); + lkl_trigger_irq(dev_pair->dev->irq); } /* * Function to add a new net device to LKL and register the cb to notify * frontend driver for the request completion. */ -int lkl_virtio_netdev_add(struct virtio_dev* netdev) +int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) { int ret = -1; - int mmio_size = VIRTIO_MMIO_CONFIG + netdev->config_len; + int mmio_size = VIRTIO_MMIO_CONFIG + netdev_host->config_len; + struct virtio_dev* netdev = alloc_shadow_virtio_dev(); + + if (!netdev) + return -1; + + devs[registered_dev_idx].dev_host = netdev_host; + devs[registered_dev_idx].dev = netdev; - registered_devs[registered_dev_idx] = netdev; - if (lkl_virtio_dev_setup(netdev, mmio_size, &lkl_deliver_irq) != 0) + if (lkl_virtio_dev_setup(netdev, netdev_host, mmio_size, &lkl_deliver_irq) != 0) return -1; ret = dev_register(netdev);