|
1 /* |
|
2 * Virtio Support |
|
3 * |
|
4 * Copyright IBM, Corp. 2007 |
|
5 * |
|
6 * Authors: |
|
7 * Anthony Liguori <aliguori@us.ibm.com> |
|
8 * |
|
9 * This work is licensed under the terms of the GNU GPL, version 2. See |
|
10 * the COPYING file in the top-level directory. |
|
11 * |
|
12 */ |
|
13 |
|
14 #include <inttypes.h> |
|
15 |
|
16 #include "virtio.h" |
|
17 #include "sysemu.h" |
|
18 |
|
19 //#define VIRTIO_ZERO_COPY |
|
20 |
|
21 /* The alignment to use between consumer and producer parts of vring. |
|
22 * x86 pagesize for historical reasons. */ |
|
23 #define VIRTIO_VRING_ALIGN 4096 |
|
24 |
|
25 /* QEMU doesn't strictly need write barriers since everything runs in |
|
26 * lock-step. We'll leave the calls to wmb() in though to make it obvious for |
|
27 * KVM or if kqemu gets SMP support. |
|
28 */ |
|
29 #define wmb() do { } while (0) |
|
30 |
|
31 typedef struct VRingDesc |
|
32 { |
|
33 uint64_t addr; |
|
34 uint32_t len; |
|
35 uint16_t flags; |
|
36 uint16_t next; |
|
37 } VRingDesc; |
|
38 |
|
39 typedef struct VRingAvail |
|
40 { |
|
41 uint16_t flags; |
|
42 uint16_t idx; |
|
43 uint16_t ring[0]; |
|
44 } VRingAvail; |
|
45 |
|
46 typedef struct VRingUsedElem |
|
47 { |
|
48 uint32_t id; |
|
49 uint32_t len; |
|
50 } VRingUsedElem; |
|
51 |
|
52 typedef struct VRingUsed |
|
53 { |
|
54 uint16_t flags; |
|
55 uint16_t idx; |
|
56 VRingUsedElem ring[0]; |
|
57 } VRingUsed; |
|
58 |
|
59 typedef struct VRing |
|
60 { |
|
61 unsigned int num; |
|
62 target_phys_addr_t desc; |
|
63 target_phys_addr_t avail; |
|
64 target_phys_addr_t used; |
|
65 } VRing; |
|
66 |
|
67 struct VirtQueue |
|
68 { |
|
69 VRing vring; |
|
70 target_phys_addr_t pa; |
|
71 uint16_t last_avail_idx; |
|
72 int inuse; |
|
73 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); |
|
74 }; |
|
75 |
|
76 #define VIRTIO_PCI_QUEUE_MAX 16 |
|
77 |
|
78 /* virt queue functions */ |
|
79 #ifdef VIRTIO_ZERO_COPY |
|
80 static void *virtio_map_gpa(target_phys_addr_t addr, size_t size) |
|
81 { |
|
82 ram_addr_t off; |
|
83 target_phys_addr_t addr1; |
|
84 |
|
85 off = cpu_get_physical_page_desc(addr); |
|
86 if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
87 fprintf(stderr, "virtio DMA to IO ram\n"); |
|
88 exit(1); |
|
89 } |
|
90 |
|
91 off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK); |
|
92 |
|
93 for (addr1 = addr + TARGET_PAGE_SIZE; |
|
94 addr1 < TARGET_PAGE_ALIGN(addr + size); |
|
95 addr1 += TARGET_PAGE_SIZE) { |
|
96 ram_addr_t off1; |
|
97 |
|
98 off1 = cpu_get_physical_page_desc(addr1); |
|
99 if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
100 fprintf(stderr, "virtio DMA to IO ram\n"); |
|
101 exit(1); |
|
102 } |
|
103 |
|
104 off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK); |
|
105 |
|
106 if (off1 != (off + (addr1 - addr))) { |
|
107 fprintf(stderr, "discontigous virtio memory\n"); |
|
108 exit(1); |
|
109 } |
|
110 } |
|
111 |
|
112 return phys_ram_base + off; |
|
113 } |
|
114 #endif |
|
115 |
|
116 static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa) |
|
117 { |
|
118 vq->vring.desc = pa; |
|
119 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc); |
|
120 vq->vring.used = vring_align(vq->vring.avail + |
|
121 offsetof(VRingAvail, ring[vq->vring.num]), |
|
122 VIRTIO_VRING_ALIGN); |
|
123 } |
|
124 |
|
125 static inline uint64_t vring_desc_addr(VirtQueue *vq, int i) |
|
126 { |
|
127 target_phys_addr_t pa; |
|
128 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); |
|
129 return ldq_phys(pa); |
|
130 } |
|
131 |
|
132 static inline uint32_t vring_desc_len(VirtQueue *vq, int i) |
|
133 { |
|
134 target_phys_addr_t pa; |
|
135 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); |
|
136 return ldl_phys(pa); |
|
137 } |
|
138 |
|
139 static inline uint16_t vring_desc_flags(VirtQueue *vq, int i) |
|
140 { |
|
141 target_phys_addr_t pa; |
|
142 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); |
|
143 return lduw_phys(pa); |
|
144 } |
|
145 |
|
146 static inline uint16_t vring_desc_next(VirtQueue *vq, int i) |
|
147 { |
|
148 target_phys_addr_t pa; |
|
149 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); |
|
150 return lduw_phys(pa); |
|
151 } |
|
152 |
|
153 static inline uint16_t vring_avail_flags(VirtQueue *vq) |
|
154 { |
|
155 target_phys_addr_t pa; |
|
156 pa = vq->vring.avail + offsetof(VRingAvail, flags); |
|
157 return lduw_phys(pa); |
|
158 } |
|
159 |
|
160 static inline uint16_t vring_avail_idx(VirtQueue *vq) |
|
161 { |
|
162 target_phys_addr_t pa; |
|
163 pa = vq->vring.avail + offsetof(VRingAvail, idx); |
|
164 return lduw_phys(pa); |
|
165 } |
|
166 |
|
167 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) |
|
168 { |
|
169 target_phys_addr_t pa; |
|
170 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); |
|
171 return lduw_phys(pa); |
|
172 } |
|
173 |
|
174 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) |
|
175 { |
|
176 target_phys_addr_t pa; |
|
177 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); |
|
178 stl_phys(pa, val); |
|
179 } |
|
180 |
|
181 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) |
|
182 { |
|
183 target_phys_addr_t pa; |
|
184 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); |
|
185 stl_phys(pa, val); |
|
186 } |
|
187 |
|
188 static uint16_t vring_used_idx(VirtQueue *vq) |
|
189 { |
|
190 target_phys_addr_t pa; |
|
191 pa = vq->vring.used + offsetof(VRingUsed, idx); |
|
192 return lduw_phys(pa); |
|
193 } |
|
194 |
|
195 static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val) |
|
196 { |
|
197 target_phys_addr_t pa; |
|
198 pa = vq->vring.used + offsetof(VRingUsed, idx); |
|
199 stw_phys(pa, vring_used_idx(vq) + val); |
|
200 } |
|
201 |
|
202 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) |
|
203 { |
|
204 target_phys_addr_t pa; |
|
205 pa = vq->vring.used + offsetof(VRingUsed, flags); |
|
206 stw_phys(pa, lduw_phys(pa) | mask); |
|
207 } |
|
208 |
|
209 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) |
|
210 { |
|
211 target_phys_addr_t pa; |
|
212 pa = vq->vring.used + offsetof(VRingUsed, flags); |
|
213 stw_phys(pa, lduw_phys(pa) & ~mask); |
|
214 } |
|
215 |
|
216 void virtio_queue_set_notification(VirtQueue *vq, int enable) |
|
217 { |
|
218 if (enable) |
|
219 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
|
220 else |
|
221 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
|
222 } |
|
223 |
|
224 int virtio_queue_ready(VirtQueue *vq) |
|
225 { |
|
226 return vq->vring.avail != 0; |
|
227 } |
|
228 |
|
229 int virtio_queue_empty(VirtQueue *vq) |
|
230 { |
|
231 return vring_avail_idx(vq) == vq->last_avail_idx; |
|
232 } |
|
233 |
|
234 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, |
|
235 unsigned int len, unsigned int idx) |
|
236 { |
|
237 unsigned int offset; |
|
238 int i; |
|
239 |
|
240 #ifndef VIRTIO_ZERO_COPY |
|
241 for (i = 0; i < elem->out_num; i++) |
|
242 qemu_free(elem->out_sg[i].iov_base); |
|
243 #endif |
|
244 |
|
245 offset = 0; |
|
246 for (i = 0; i < elem->in_num; i++) { |
|
247 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); |
|
248 |
|
249 #ifdef VIRTIO_ZERO_COPY |
|
250 if (size) { |
|
251 ram_addr_t addr = (uint8_t *)elem->in_sg[i].iov_base - phys_ram_base; |
|
252 ram_addr_t off; |
|
253 |
|
254 for (off = 0; off < size; off += TARGET_PAGE_SIZE) |
|
255 cpu_physical_memory_set_dirty(addr + off); |
|
256 } |
|
257 #else |
|
258 if (size) |
|
259 cpu_physical_memory_write(elem->in_addr[i], |
|
260 elem->in_sg[i].iov_base, |
|
261 size); |
|
262 |
|
263 qemu_free(elem->in_sg[i].iov_base); |
|
264 #endif |
|
265 |
|
266 offset += size; |
|
267 } |
|
268 |
|
269 idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
|
270 |
|
271 /* Get a pointer to the next entry in the used ring. */ |
|
272 vring_used_ring_id(vq, idx, elem->index); |
|
273 vring_used_ring_len(vq, idx, len); |
|
274 } |
|
275 |
|
276 void virtqueue_flush(VirtQueue *vq, unsigned int count) |
|
277 { |
|
278 /* Make sure buffer is written before we update index. */ |
|
279 wmb(); |
|
280 vring_used_idx_increment(vq, count); |
|
281 vq->inuse -= count; |
|
282 } |
|
283 |
|
284 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, |
|
285 unsigned int len) |
|
286 { |
|
287 virtqueue_fill(vq, elem, len, 0); |
|
288 virtqueue_flush(vq, 1); |
|
289 } |
|
290 |
|
291 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) |
|
292 { |
|
293 uint16_t num_heads = vring_avail_idx(vq) - idx; |
|
294 |
|
295 /* Check it isn't doing very strange things with descriptor numbers. */ |
|
296 if (num_heads > vq->vring.num) { |
|
297 fprintf(stderr, "Guest moved used index from %u to %u", |
|
298 idx, vring_avail_idx(vq)); |
|
299 exit(1); |
|
300 } |
|
301 |
|
302 return num_heads; |
|
303 } |
|
304 |
|
305 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) |
|
306 { |
|
307 unsigned int head; |
|
308 |
|
309 /* Grab the next descriptor number they're advertising, and increment |
|
310 * the index we've seen. */ |
|
311 head = vring_avail_ring(vq, idx % vq->vring.num); |
|
312 |
|
313 /* If their number is silly, that's a fatal mistake. */ |
|
314 if (head >= vq->vring.num) { |
|
315 fprintf(stderr, "Guest says index %u is available", head); |
|
316 exit(1); |
|
317 } |
|
318 |
|
319 return head; |
|
320 } |
|
321 |
|
322 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i) |
|
323 { |
|
324 unsigned int next; |
|
325 |
|
326 /* If this descriptor says it doesn't chain, we're done. */ |
|
327 if (!(vring_desc_flags(vq, i) & VRING_DESC_F_NEXT)) |
|
328 return vq->vring.num; |
|
329 |
|
330 /* Check they're not leading us off end of descriptors. */ |
|
331 next = vring_desc_next(vq, i); |
|
332 /* Make sure compiler knows to grab that: we don't want it changing! */ |
|
333 wmb(); |
|
334 |
|
335 if (next >= vq->vring.num) { |
|
336 fprintf(stderr, "Desc next is %u", next); |
|
337 exit(1); |
|
338 } |
|
339 |
|
340 return next; |
|
341 } |
|
342 |
|
343 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) |
|
344 { |
|
345 unsigned int idx; |
|
346 int num_bufs, in_total, out_total; |
|
347 |
|
348 idx = vq->last_avail_idx; |
|
349 |
|
350 num_bufs = in_total = out_total = 0; |
|
351 while (virtqueue_num_heads(vq, idx)) { |
|
352 int i; |
|
353 |
|
354 i = virtqueue_get_head(vq, idx++); |
|
355 do { |
|
356 /* If we've got too many, that implies a descriptor loop. */ |
|
357 if (++num_bufs > vq->vring.num) { |
|
358 fprintf(stderr, "Looped descriptor"); |
|
359 exit(1); |
|
360 } |
|
361 |
|
362 if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) { |
|
363 if (in_bytes > 0 && |
|
364 (in_total += vring_desc_len(vq, i)) >= in_bytes) |
|
365 return 1; |
|
366 } else { |
|
367 if (out_bytes > 0 && |
|
368 (out_total += vring_desc_len(vq, i)) >= out_bytes) |
|
369 return 1; |
|
370 } |
|
371 } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num); |
|
372 } |
|
373 |
|
374 return 0; |
|
375 } |
|
376 |
|
377 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) |
|
378 { |
|
379 unsigned int i, head; |
|
380 |
|
381 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) |
|
382 return 0; |
|
383 |
|
384 /* When we start there are none of either input nor output. */ |
|
385 elem->out_num = elem->in_num = 0; |
|
386 |
|
387 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
|
388 do { |
|
389 struct iovec *sg; |
|
390 |
|
391 if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) { |
|
392 elem->in_addr[elem->in_num] = vring_desc_addr(vq, i); |
|
393 sg = &elem->in_sg[elem->in_num++]; |
|
394 } else |
|
395 sg = &elem->out_sg[elem->out_num++]; |
|
396 |
|
397 /* Grab the first descriptor, and check it's OK. */ |
|
398 sg->iov_len = vring_desc_len(vq, i); |
|
399 |
|
400 #ifdef VIRTIO_ZERO_COPY |
|
401 sg->iov_base = virtio_map_gpa(vring_desc_addr(vq, i), sg->iov_len); |
|
402 #else |
|
403 /* cap individual scatter element size to prevent unbounded allocations |
|
404 of memory from the guest. Practically speaking, no virtio driver |
|
405 will ever pass more than a page in each element. We set the cap to |
|
406 be 2MB in case for some reason a large page makes it way into the |
|
407 sg list. When we implement a zero copy API, this limitation will |
|
408 disappear */ |
|
409 if (sg->iov_len > (2 << 20)) |
|
410 sg->iov_len = 2 << 20; |
|
411 |
|
412 sg->iov_base = qemu_malloc(sg->iov_len); |
|
413 if (sg->iov_base && |
|
414 !(vring_desc_flags(vq, i) & VRING_DESC_F_WRITE)) { |
|
415 cpu_physical_memory_read(vring_desc_addr(vq, i), |
|
416 sg->iov_base, |
|
417 sg->iov_len); |
|
418 } |
|
419 #endif |
|
420 if (sg->iov_base == NULL) { |
|
421 fprintf(stderr, "Invalid mapping\n"); |
|
422 exit(1); |
|
423 } |
|
424 |
|
425 /* If we've got too many, that implies a descriptor loop. */ |
|
426 if ((elem->in_num + elem->out_num) > vq->vring.num) { |
|
427 fprintf(stderr, "Looped descriptor"); |
|
428 exit(1); |
|
429 } |
|
430 } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num); |
|
431 |
|
432 elem->index = head; |
|
433 |
|
434 vq->inuse++; |
|
435 |
|
436 return elem->in_num + elem->out_num; |
|
437 } |
|
438 |
|
439 /* virtio device. */ |
|
440 |
|
441 uint32_t virtio_config_readb(void *opaque, uint32_t addr) |
|
442 { |
|
443 VirtIODevice *vdev = opaque; |
|
444 uint8_t val; |
|
445 |
|
446 vdev->get_config(vdev, vdev->config); |
|
447 |
|
448 if (addr > (vdev->config_len - sizeof(val))) |
|
449 return (uint32_t)-1; |
|
450 |
|
451 memcpy(&val, vdev->config + addr, sizeof(val)); |
|
452 return val; |
|
453 } |
|
454 |
|
455 uint32_t virtio_config_readw(void *opaque, uint32_t addr) |
|
456 { |
|
457 VirtIODevice *vdev = opaque; |
|
458 uint16_t val; |
|
459 |
|
460 vdev->get_config(vdev, vdev->config); |
|
461 |
|
462 if (addr > (vdev->config_len - sizeof(val))) |
|
463 return (uint32_t)-1; |
|
464 |
|
465 memcpy(&val, vdev->config + addr, sizeof(val)); |
|
466 return val; |
|
467 } |
|
468 |
|
469 uint32_t virtio_config_readl(void *opaque, uint32_t addr) |
|
470 { |
|
471 VirtIODevice *vdev = opaque; |
|
472 uint32_t val; |
|
473 |
|
474 vdev->get_config(vdev, vdev->config); |
|
475 |
|
476 if (addr > (vdev->config_len - sizeof(val))) |
|
477 return (uint32_t)-1; |
|
478 |
|
479 memcpy(&val, vdev->config + addr, sizeof(val)); |
|
480 return val; |
|
481 } |
|
482 |
|
483 void virtio_config_writeb(void *opaque, uint32_t addr, uint32_t data) |
|
484 { |
|
485 VirtIODevice *vdev = opaque; |
|
486 uint8_t val = data; |
|
487 |
|
488 if (addr > (vdev->config_len - sizeof(val))) |
|
489 return; |
|
490 |
|
491 memcpy(vdev->config + addr, &val, sizeof(val)); |
|
492 |
|
493 if (vdev->set_config) |
|
494 vdev->set_config(vdev, vdev->config); |
|
495 } |
|
496 |
|
497 void virtio_config_writew(void *opaque, uint32_t addr, uint32_t data) |
|
498 { |
|
499 VirtIODevice *vdev = opaque; |
|
500 uint16_t val = data; |
|
501 |
|
502 if (addr > (vdev->config_len - sizeof(val))) |
|
503 return; |
|
504 |
|
505 memcpy(vdev->config + addr, &val, sizeof(val)); |
|
506 |
|
507 if (vdev->set_config) |
|
508 vdev->set_config(vdev, vdev->config); |
|
509 } |
|
510 |
|
511 void virtio_config_writel(void *opaque, uint32_t addr, uint32_t data) |
|
512 { |
|
513 VirtIODevice *vdev = opaque; |
|
514 uint32_t val = data; |
|
515 |
|
516 if (addr > (vdev->config_len - sizeof(val))) |
|
517 return; |
|
518 |
|
519 memcpy(vdev->config + addr, &val, sizeof(val)); |
|
520 |
|
521 if (vdev->set_config) |
|
522 vdev->set_config(vdev, vdev->config); |
|
523 } |
|
524 |
|
525 void virtio_reset(void *opaque) |
|
526 { |
|
527 VirtIODevice *vdev = opaque; |
|
528 int i; |
|
529 |
|
530 if (vdev->reset) |
|
531 vdev->reset(vdev); |
|
532 |
|
533 vdev->features = 0; |
|
534 vdev->queue_sel = 0; |
|
535 vdev->status = 0; |
|
536 vdev->isr = 0; |
|
537 virtio_update_irq(vdev); |
|
538 |
|
539 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
|
540 vdev->vq[i].vring.desc = 0; |
|
541 vdev->vq[i].vring.avail = 0; |
|
542 vdev->vq[i].vring.used = 0; |
|
543 vdev->vq[i].last_avail_idx = 0; |
|
544 vdev->vq[i].pa = 0; |
|
545 } |
|
546 } |
|
547 |
|
548 void virtio_set_vring_addr(VirtIODevice *vdev, int n, target_phys_addr_t pa) |
|
549 { |
|
550 VirtQueue *vq = &vdev->vq[n]; |
|
551 |
|
552 vq->pa = pa; |
|
553 if (pa == 0) { |
|
554 virtio_reset(vdev); |
|
555 } else { |
|
556 virtqueue_init(vq, pa); |
|
557 } |
|
558 } |
|
559 |
|
560 target_phys_addr_t virtio_get_vring_pa(VirtIODevice *vdev, int n) |
|
561 { |
|
562 VirtQueue *vq = &vdev->vq[n]; |
|
563 |
|
564 return vq->pa; |
|
565 } |
|
566 |
|
567 target_phys_addr_t virtio_get_vring_num(VirtIODevice *vdev, int n) |
|
568 { |
|
569 VirtQueue *vq = &vdev->vq[n]; |
|
570 |
|
571 return vq->vring.num; |
|
572 } |
|
573 |
|
574 void virtio_kick(VirtIODevice *vdev, int n) |
|
575 { |
|
576 VirtQueue *vq = &vdev->vq[n]; |
|
577 if (n < VIRTIO_PCI_QUEUE_MAX && vq->vring.desc) |
|
578 vq->handle_output(vdev, vq); |
|
579 } |
|
580 |
|
581 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, |
|
582 void (*handle_output)(VirtIODevice *, VirtQueue *)) |
|
583 { |
|
584 int i; |
|
585 |
|
586 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
|
587 if (vdev->vq[i].vring.num == 0) |
|
588 break; |
|
589 } |
|
590 |
|
591 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) |
|
592 abort(); |
|
593 |
|
594 vdev->vq[i].vring.num = queue_size; |
|
595 vdev->vq[i].handle_output = handle_output; |
|
596 |
|
597 return &vdev->vq[i]; |
|
598 } |
|
599 |
|
600 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) |
|
601 { |
|
602 /* Always notify when queue is empty */ |
|
603 if ((vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx) && |
|
604 (vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT)) |
|
605 return; |
|
606 |
|
607 vdev->isr |= 0x01; |
|
608 virtio_update_irq(vdev); |
|
609 } |
|
610 |
|
611 void virtio_notify_config(VirtIODevice *vdev) |
|
612 { |
|
613 vdev->isr |= 0x03; |
|
614 virtio_update_irq(vdev); |
|
615 } |
|
616 |
|
617 void virtio_save(VirtIODevice *vdev, QEMUFile *f) |
|
618 { |
|
619 int i; |
|
620 |
|
621 vdev->save_binding(vdev, f); |
|
622 |
|
623 qemu_put_be32s(f, &vdev->addr); |
|
624 qemu_put_8s(f, &vdev->status); |
|
625 qemu_put_8s(f, &vdev->isr); |
|
626 qemu_put_be16s(f, &vdev->queue_sel); |
|
627 qemu_put_be32s(f, &vdev->features); |
|
628 qemu_put_be32(f, vdev->config_len); |
|
629 qemu_put_buffer(f, vdev->config, vdev->config_len); |
|
630 |
|
631 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
|
632 if (vdev->vq[i].vring.num == 0) |
|
633 break; |
|
634 } |
|
635 |
|
636 qemu_put_be32(f, i); |
|
637 |
|
638 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
|
639 if (vdev->vq[i].vring.num == 0) |
|
640 break; |
|
641 |
|
642 qemu_put_be32(f, vdev->vq[i].vring.num); |
|
643 qemu_put_be64(f, vdev->vq[i].pa); |
|
644 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
|
645 } |
|
646 } |
|
647 |
|
648 void virtio_load(VirtIODevice *vdev, QEMUFile *f) |
|
649 { |
|
650 int num, i; |
|
651 |
|
652 vdev->load_binding(vdev, f); |
|
653 |
|
654 qemu_get_be32s(f, &vdev->addr); |
|
655 qemu_get_8s(f, &vdev->status); |
|
656 qemu_get_8s(f, &vdev->isr); |
|
657 qemu_get_be16s(f, &vdev->queue_sel); |
|
658 qemu_get_be32s(f, &vdev->features); |
|
659 vdev->config_len = qemu_get_be32(f); |
|
660 qemu_get_buffer(f, vdev->config, vdev->config_len); |
|
661 |
|
662 num = qemu_get_be32(f); |
|
663 |
|
664 for (i = 0; i < num; i++) { |
|
665 vdev->vq[i].vring.num = qemu_get_be32(f); |
|
666 vdev->vq[i].pa = qemu_get_be64(f); |
|
667 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
|
668 |
|
669 if (vdev->vq[i].pa) { |
|
670 virtqueue_init(&vdev->vq[i], vdev->vq[i].pa); |
|
671 } |
|
672 } |
|
673 |
|
674 virtio_update_irq(vdev); |
|
675 } |
|
676 |
|
677 VirtIODevice *virtio_init_common(const char *name, size_t config_size, |
|
678 size_t struct_size) |
|
679 { |
|
680 VirtIODevice *vdev; |
|
681 |
|
682 vdev = (VirtIODevice *)qemu_mallocz(struct_size); |
|
683 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX); |
|
684 |
|
685 vdev->status = 0; |
|
686 vdev->isr = 0; |
|
687 vdev->queue_sel = 0; |
|
688 vdev->name = name; |
|
689 vdev->config_len = config_size; |
|
690 if (vdev->config_len) |
|
691 vdev->config = qemu_mallocz(config_size); |
|
692 else |
|
693 vdev->config = NULL; |
|
694 |
|
695 qemu_register_reset(virtio_reset, vdev); |
|
696 |
|
697 return vdev; |
|
698 } |
|
699 |