1 /*
2 * Copyright (c) 2014-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <dev/virtio.h>
24 #include <dev/virtio/virtio_ring.h>
25
26 #include <debug.h>
27 #include <assert.h>
28 #include <trace.h>
29 #include <compiler.h>
30 #include <list.h>
31 #include <err.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <pow2.h>
35 #include <lk/init.h>
36 #include <kernel/thread.h>
37 #include <kernel/vm.h>
38 #include <platform/interrupts.h>
39
40 #include "virtio_priv.h"
41
42 #if WITH_DEV_VIRTIO_BLOCK
43 #include <dev/virtio/block.h>
44 #endif
45 #if WITH_DEV_VIRTIO_NET
46 #include <dev/virtio/net.h>
47 #endif
48 #if WITH_DEV_VIRTIO_GPU
49 #include <dev/virtio/gpu.h>
50 #endif
51
52 #define LOCAL_TRACE 0
53
54 static struct virtio_device *devices;
55
dump_mmio_config(const volatile struct virtio_mmio_config * mmio)56 static void dump_mmio_config(const volatile struct virtio_mmio_config *mmio)
57 {
58 printf("mmio at %p\n", mmio);
59 printf("\tmagic 0x%x\n", mmio->magic);
60 printf("\tversion 0x%x\n", mmio->version);
61 printf("\tdevice_id 0x%x\n", mmio->device_id);
62 printf("\tvendor_id 0x%x\n", mmio->vendor_id);
63 printf("\thost_features 0x%x\n", mmio->host_features);
64 printf("\tguest_page_size %u\n", mmio->guest_page_size);
65 printf("\tqnum %u\n", mmio->queue_num);
66 printf("\tqnum_max %u\n", mmio->queue_num_max);
67 printf("\tqnum_align %u\n", mmio->queue_align);
68 printf("\tqnum_pfn %u\n", mmio->queue_pfn);
69 printf("\tstatus 0x%x\n", mmio->status);
70 }
71
virtio_dump_desc(const struct vring_desc * desc)72 void virtio_dump_desc(const struct vring_desc *desc)
73 {
74 printf("vring descriptor %p\n", desc);
75 printf("\taddr 0x%" PRIx64 "\n", desc->addr);
76 printf("\tlen 0x%x\n", desc->len);
77 printf("\tflags 0x%" PRIx16 "\n", desc->flags);
78 printf("\tnext 0x%" PRIx16 "\n", desc->next);
79 }
80
virtio_mmio_irq(void * arg)81 static enum handler_return virtio_mmio_irq(void *arg)
82 {
83 struct virtio_device *dev = (struct virtio_device *)arg;
84 LTRACEF("dev %p, index %u\n", dev, dev->index);
85
86 uint32_t irq_status = dev->mmio_config->interrupt_status;
87 LTRACEF("status 0x%x\n", irq_status);
88
89 enum handler_return ret = INT_NO_RESCHEDULE;
90 if (irq_status & 0x1) { /* used ring update */
91 // XXX is this safe?
92 dev->mmio_config->interrupt_ack = 0x1;
93
94 /* cycle through all the active rings */
95 for (uint r = 0; r < MAX_VIRTIO_RINGS; r++) {
96 if ((dev->active_rings_bitmap & (1<<r)) == 0)
97 continue;
98
99 struct vring *ring = &dev->ring[r];
100 LTRACEF("ring %u: used flags 0x%" PRIx16 " idx 0x%" PRIx16 " last_used %u\n",
101 r, ring->used->flags, ring->used->idx, ring->last_used);
102
103 uint cur_idx = ring->used->idx;
104 for (uint i = ring->last_used; i != cur_idx; i = (i + 1) & 0xffff) {
105 LTRACEF("looking at idx %u\n", i);
106
107 // process chain
108 struct vring_used_elem *used_elem = &ring->used->ring[i & ring->num_mask];
109 LTRACEF("id %u, len %u\n", used_elem->id, used_elem->len);
110
111 DEBUG_ASSERT(dev->irq_driver_callback);
112 ret |= dev->irq_driver_callback(dev, r, used_elem);
113
114 DEBUG_ASSERT(i == ring->last_used);
115 ring->last_used = (ring->last_used + 1) & 0xffff;
116 }
117 }
118 }
119 if (irq_status & 0x2) { /* config change */
120 dev->mmio_config->interrupt_ack = 0x2;
121
122 if (dev->config_change_callback) {
123 ret |= dev->config_change_callback(dev);
124 }
125 }
126
127 LTRACEF("exiting irq\n");
128
129 return ret;
130 }
131
virtio_mmio_detect(void * ptr,uint count,const uint irqs[])132 int virtio_mmio_detect(void *ptr, uint count, const uint irqs[])
133 {
134 LTRACEF("ptr %p, count %u\n", ptr, count);
135
136 DEBUG_ASSERT(ptr);
137 DEBUG_ASSERT(irqs);
138 DEBUG_ASSERT(!devices);
139
140 /* allocate an array big enough to hold a list of devices */
141 devices = calloc(count, sizeof(struct virtio_device));
142 if (!devices)
143 return ERR_NO_MEMORY;
144
145 int found = 0;
146 for (uint i = 0; i < count; i++) {
147 volatile struct virtio_mmio_config *mmio = (struct virtio_mmio_config *)((uint8_t *)ptr + i * 0x200);
148 struct virtio_device *dev = &devices[i];
149
150 dev->index = i;
151 dev->irq = irqs[i];
152
153 mask_interrupt(irqs[i]);
154 register_int_handler(irqs[i], &virtio_mmio_irq, (void *)dev);
155
156 LTRACEF("looking at magic 0x%x version 0x%x did 0x%x vid 0x%x\n",
157 mmio->magic, mmio->version, mmio->device_id, mmio->vendor_id);
158
159 if (mmio->magic != VIRTIO_MMIO_MAGIC) {
160 continue;
161 }
162
163 #if LOCAL_TRACE
164 if (mmio->device_id != 0) {
165 dump_mmio_config(mmio);
166 }
167 #endif
168
169 #if WITH_DEV_VIRTIO_BLOCK
170 if (mmio->device_id == 2) { // block device
171 LTRACEF("found block device\n");
172
173 dev->mmio_config = mmio;
174 dev->config_ptr = (void *)mmio->config;
175
176 status_t err = virtio_block_init(dev, mmio->host_features);
177 if (err >= 0) {
178 // good device
179 dev->valid = true;
180
181 if (dev->irq_driver_callback)
182 unmask_interrupt(dev->irq);
183
184 // XXX quick test code, remove
185 #if 0
186 uint8_t buf[512];
187 memset(buf, 0x99, sizeof(buf));
188 virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
189 hexdump8_ex(buf, sizeof(buf), 0);
190
191 buf[0]++;
192 virtio_block_read_write(dev, buf, 0, sizeof(buf), true);
193
194 virtio_block_read_write(dev, buf, 0, sizeof(buf), false);
195 hexdump8_ex(buf, sizeof(buf), 0);
196 #endif
197 }
198
199 }
200 #endif // WITH_DEV_VIRTIO_BLOCK
201 #if WITH_DEV_VIRTIO_NET
202 if (mmio->device_id == 1) { // network device
203 LTRACEF("found net device\n");
204
205 dev->mmio_config = mmio;
206 dev->config_ptr = (void *)mmio->config;
207
208 status_t err = virtio_net_init(dev, mmio->host_features);
209 if (err >= 0) {
210 // good device
211 dev->valid = true;
212
213 if (dev->irq_driver_callback)
214 unmask_interrupt(dev->irq);
215 }
216 }
217 #endif // WITH_DEV_VIRTIO_NET
218 #if WITH_DEV_VIRTIO_GPU
219 if (mmio->device_id == 0x10) { // virtio-gpu
220 LTRACEF("found gpu device\n");
221
222 dev->mmio_config = mmio;
223 dev->config_ptr = (void *)mmio->config;
224
225 status_t err = virtio_gpu_init(dev, mmio->host_features);
226 if (err >= 0) {
227 // good device
228 dev->valid = true;
229
230 if (dev->irq_driver_callback)
231 unmask_interrupt(dev->irq);
232
233 virtio_gpu_start(dev);
234 }
235 }
236 #endif // WITH_DEV_VIRTIO_GPU
237
238 if (dev->valid)
239 found++;
240 }
241
242 return found;
243 }
244
virtio_free_desc(struct virtio_device * dev,uint ring_index,uint16_t desc_index)245 void virtio_free_desc(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
246 {
247 LTRACEF("dev %p ring %u index %u free_count %u\n", dev, ring_index, desc_index, dev->ring[ring_index].free_count);
248 dev->ring[ring_index].desc[desc_index].next = dev->ring[ring_index].free_list;
249 dev->ring[ring_index].free_list = desc_index;
250 dev->ring[ring_index].free_count++;
251 }
252
virtio_alloc_desc(struct virtio_device * dev,uint ring_index)253 uint16_t virtio_alloc_desc(struct virtio_device *dev, uint ring_index)
254 {
255 if (dev->ring[ring_index].free_count == 0)
256 return 0xffff;
257
258 DEBUG_ASSERT(dev->ring[ring_index].free_list != 0xffff);
259
260 uint16_t i = dev->ring[ring_index].free_list;
261 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
262 dev->ring[ring_index].free_list = desc->next;
263
264 dev->ring[ring_index].free_count--;
265
266 return i;
267 }
268
virtio_alloc_desc_chain(struct virtio_device * dev,uint ring_index,size_t count,uint16_t * start_index)269 struct vring_desc *virtio_alloc_desc_chain(struct virtio_device *dev, uint ring_index, size_t count, uint16_t *start_index)
270 {
271 if (dev->ring[ring_index].free_count < count)
272 return NULL;
273
274 /* start popping entries off the chain */
275 struct vring_desc *last = 0;
276 uint16_t last_index = 0;
277 while (count > 0) {
278 uint16_t i = dev->ring[ring_index].free_list;
279 struct vring_desc *desc = &dev->ring[ring_index].desc[i];
280
281 dev->ring[ring_index].free_list = desc->next;
282 dev->ring[ring_index].free_count--;
283
284 if (last) {
285 desc->flags = VRING_DESC_F_NEXT;
286 desc->next = last_index;
287 } else {
288 // first one
289 desc->flags = 0;
290 desc->next = 0;
291 }
292 last = desc;
293 last_index = i;
294 count--;
295 }
296
297 if (start_index)
298 *start_index = last_index;
299
300 return last;
301 }
302
virtio_submit_chain(struct virtio_device * dev,uint ring_index,uint16_t desc_index)303 void virtio_submit_chain(struct virtio_device *dev, uint ring_index, uint16_t desc_index)
304 {
305 LTRACEF("dev %p, ring %u, desc %u\n", dev, ring_index, desc_index);
306
307 /* add the chain to the available list */
308 struct vring_avail *avail = dev->ring[ring_index].avail;
309
310 avail->ring[avail->idx & dev->ring[ring_index].num_mask] = desc_index;
311 mb();
312 avail->idx++;
313
314 #if LOCAL_TRACE
315 hexdump(avail, 16);
316 #endif
317 }
318
virtio_kick(struct virtio_device * dev,uint ring_index)319 void virtio_kick(struct virtio_device *dev, uint ring_index)
320 {
321 LTRACEF("dev %p, ring %u\n", dev, ring_index);
322
323 dev->mmio_config->queue_notify = ring_index;
324 mb();
325 }
326
virtio_alloc_ring(struct virtio_device * dev,uint index,uint16_t len)327 status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
328 {
329 LTRACEF("dev %p, index %u, len %u\n", dev, index, len);
330
331 DEBUG_ASSERT(dev);
332 DEBUG_ASSERT(len > 0 && ispow2(len));
333 DEBUG_ASSERT(index < MAX_VIRTIO_RINGS);
334
335 if (len == 0 || !ispow2(len))
336 return ERR_INVALID_ARGS;
337
338 struct vring *ring = &dev->ring[index];
339
340 /* allocate a ring */
341 size_t size = vring_size(len, PAGE_SIZE);
342 LTRACEF("need %zu bytes\n", size);
343
344 #if WITH_KERNEL_VM
345 void *vptr;
346 status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "virtio_ring", size, &vptr,
347 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE |
348 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
349 if (err < 0)
350 return ERR_NO_MEMORY;
351
352 LTRACEF("allocated virtio_ring at va %p\n", vptr);
353
354 /* compute the physical address */
355 paddr_t pa;
356 pa = vaddr_to_paddr(vptr);
357 if (pa == 0) {
358 return ERR_NO_MEMORY;
359 }
360
361 LTRACEF("virtio_ring at pa 0x%" PRIxPADDR "\n", pa);
362 #else
363 void *vptr = memalign(PAGE_SIZE, size);
364 if (!vptr)
365 return ERR_NO_MEMORY;
366
367 LTRACEF("ptr %p\n", vptr);
368 memset(vptr, 0, size);
369
370 /* compute the physical address */
371 paddr_t pa = (paddr_t)vptr;
372 #endif
373
374 /* initialize the ring */
375 vring_init(ring, len, vptr, PAGE_SIZE);
376 dev->ring[index].free_list = 0xffff;
377 dev->ring[index].free_count = 0;
378
379 /* add all the descriptors to the free list */
380 for (uint i = 0; i < len; i++) {
381 virtio_free_desc(dev, index, i);
382 }
383
384 /* register the ring with the device */
385 DEBUG_ASSERT(dev->mmio_config);
386 dev->mmio_config->guest_page_size = PAGE_SIZE;
387 dev->mmio_config->queue_sel = index;
388 dev->mmio_config->queue_num = len;
389 dev->mmio_config->queue_align = PAGE_SIZE;
390 dev->mmio_config->queue_pfn = pa / PAGE_SIZE;
391
392 /* mark the ring active */
393 dev->active_rings_bitmap |= (1 << index);
394
395 return NO_ERROR;
396 }
397
virtio_reset_device(struct virtio_device * dev)398 void virtio_reset_device(struct virtio_device *dev)
399 {
400 dev->mmio_config->status = 0;
401 }
402
virtio_status_acknowledge_driver(struct virtio_device * dev)403 void virtio_status_acknowledge_driver(struct virtio_device *dev)
404 {
405 dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
406 }
407
virtio_status_driver_ok(struct virtio_device * dev)408 void virtio_status_driver_ok(struct virtio_device *dev)
409 {
410 dev->mmio_config->status |= VIRTIO_STATUS_DRIVER_OK;
411 }
412
virtio_init(uint level)413 void virtio_init(uint level)
414 {
415 }
416
417 LK_INIT_HOOK(virtio, &virtio_init, LK_INIT_LEVEL_THREADING);
418
419