|
1 /* |
|
2 * Virtio Block Device |
|
3 * |
|
4 * Copyright IBM, Corp. 2007 |
|
5 * |
|
6 * Authors: |
|
7 * Anthony Liguori <aliguori@us.ibm.com> |
|
8 * |
|
9 * This work is licensed under the terms of the GNU GPL, version 2. See |
|
10 * the COPYING file in the top-level directory. |
|
11 * |
|
12 */ |
|
13 |
|
14 #include "virtio-blk.h" |
|
15 #include "block_int.h" |
|
16 |
|
17 typedef struct VirtIOBlock |
|
18 { |
|
19 VirtIODevice vdev; |
|
20 BlockDriverState *bs; |
|
21 VirtQueue *vq; |
|
22 } VirtIOBlock; |
|
23 |
|
24 static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev) |
|
25 { |
|
26 return (VirtIOBlock *)vdev; |
|
27 } |
|
28 |
|
29 typedef struct VirtIOBlockReq |
|
30 { |
|
31 VirtIOBlock *dev; |
|
32 VirtQueueElement elem; |
|
33 struct virtio_blk_inhdr *in; |
|
34 struct virtio_blk_outhdr *out; |
|
35 size_t size; |
|
36 uint8_t *buffer; |
|
37 } VirtIOBlockReq; |
|
38 |
|
39 static void virtio_blk_rw_complete(void *opaque, int ret) |
|
40 { |
|
41 VirtIOBlockReq *req = opaque; |
|
42 VirtIOBlock *s = req->dev; |
|
43 |
|
44 /* Copy read data to the guest */ |
|
45 if (!ret && !(req->out->type & VIRTIO_BLK_T_OUT)) { |
|
46 size_t offset = 0; |
|
47 int i; |
|
48 |
|
49 for (i = 0; i < req->elem.in_num - 1; i++) { |
|
50 size_t len; |
|
51 |
|
52 /* Be pretty defensive wrt malicious guests */ |
|
53 len = MIN(req->elem.in_sg[i].iov_len, |
|
54 req->size - offset); |
|
55 |
|
56 memcpy(req->elem.in_sg[i].iov_base, |
|
57 req->buffer + offset, |
|
58 len); |
|
59 offset += len; |
|
60 } |
|
61 } |
|
62 |
|
63 req->in->status = ret ? VIRTIO_BLK_S_IOERR : VIRTIO_BLK_S_OK; |
|
64 virtqueue_push(s->vq, &req->elem, req->size + sizeof(*req->in)); |
|
65 virtio_notify(&s->vdev, s->vq); |
|
66 |
|
67 qemu_free(req->buffer); |
|
68 qemu_free(req); |
|
69 } |
|
70 |
|
71 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) |
|
72 { |
|
73 VirtIOBlockReq *req; |
|
74 |
|
75 req = qemu_mallocz(sizeof(*req)); |
|
76 if (req == NULL) |
|
77 return NULL; |
|
78 |
|
79 req->dev = s; |
|
80 if (!virtqueue_pop(s->vq, &req->elem)) { |
|
81 qemu_free(req); |
|
82 return NULL; |
|
83 } |
|
84 |
|
85 return req; |
|
86 } |
|
87 |
|
88 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
|
89 { |
|
90 VirtIOBlock *s = to_virtio_blk(vdev); |
|
91 VirtIOBlockReq *req; |
|
92 |
|
93 while ((req = virtio_blk_get_request(s))) { |
|
94 int i; |
|
95 |
|
96 if (req->elem.out_num < 1 || req->elem.in_num < 1) { |
|
97 fprintf(stderr, "virtio-blk missing headers\n"); |
|
98 exit(1); |
|
99 } |
|
100 |
|
101 if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || |
|
102 req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { |
|
103 fprintf(stderr, "virtio-blk header not in correct element\n"); |
|
104 exit(1); |
|
105 } |
|
106 |
|
107 req->out = (void *)req->elem.out_sg[0].iov_base; |
|
108 req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; |
|
109 |
|
110 if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) { |
|
111 unsigned int len = sizeof(*req->in); |
|
112 |
|
113 req->in->status = VIRTIO_BLK_S_UNSUPP; |
|
114 virtqueue_push(vq, &req->elem, len); |
|
115 virtio_notify(vdev, vq); |
|
116 qemu_free(req); |
|
117 } else if (req->out->type & VIRTIO_BLK_T_OUT) { |
|
118 size_t offset; |
|
119 |
|
120 for (i = 1; i < req->elem.out_num; i++) |
|
121 req->size += req->elem.out_sg[i].iov_len; |
|
122 |
|
123 req->buffer = qemu_memalign(512, req->size); |
|
124 if (req->buffer == NULL) { |
|
125 qemu_free(req); |
|
126 break; |
|
127 } |
|
128 |
|
129 /* We copy the data from the SG list to avoid splitting up the request. This helps |
|
130 performance a lot until we can pass full sg lists as AIO operations */ |
|
131 offset = 0; |
|
132 for (i = 1; i < req->elem.out_num; i++) { |
|
133 size_t len; |
|
134 |
|
135 len = MIN(req->elem.out_sg[i].iov_len, |
|
136 req->size - offset); |
|
137 memcpy(req->buffer + offset, |
|
138 req->elem.out_sg[i].iov_base, |
|
139 len); |
|
140 offset += len; |
|
141 } |
|
142 |
|
143 bdrv_aio_write(s->bs, req->out->sector, |
|
144 req->buffer, |
|
145 req->size / 512, |
|
146 virtio_blk_rw_complete, |
|
147 req); |
|
148 } else { |
|
149 for (i = 0; i < req->elem.in_num - 1; i++) |
|
150 req->size += req->elem.in_sg[i].iov_len; |
|
151 |
|
152 req->buffer = qemu_memalign(512, req->size); |
|
153 if (req->buffer == NULL) { |
|
154 qemu_free(req); |
|
155 break; |
|
156 } |
|
157 |
|
158 bdrv_aio_read(s->bs, req->out->sector, |
|
159 req->buffer, |
|
160 req->size / 512, |
|
161 virtio_blk_rw_complete, |
|
162 req); |
|
163 } |
|
164 } |
|
165 /* |
|
166 * FIXME: Want to check for completions before returning to guest mode, |
|
167 * so cached reads and writes are reported as quickly as possible. But |
|
168 * that should be done in the generic block layer. |
|
169 */ |
|
170 } |
|
171 |
|
172 static void virtio_blk_reset(VirtIODevice *vdev) |
|
173 { |
|
174 /* |
|
175 * This should cancel pending requests, but can't do nicely until there |
|
176 * are per-device request lists. |
|
177 */ |
|
178 qemu_aio_flush(); |
|
179 } |
|
180 |
|
181 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) |
|
182 { |
|
183 VirtIOBlock *s = to_virtio_blk(vdev); |
|
184 struct virtio_blk_config blkcfg; |
|
185 uint64_t capacity; |
|
186 int cylinders, heads, secs; |
|
187 |
|
188 bdrv_get_geometry(s->bs, &capacity); |
|
189 bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs); |
|
190 stq_raw(&blkcfg.capacity, capacity); |
|
191 stl_raw(&blkcfg.seg_max, 128 - 2); |
|
192 stw_raw(&blkcfg.cylinders, cylinders); |
|
193 blkcfg.heads = heads; |
|
194 blkcfg.sectors = secs; |
|
195 memcpy(config, &blkcfg, sizeof(blkcfg)); |
|
196 } |
|
197 |
|
198 static uint32_t virtio_blk_get_features(VirtIODevice *vdev) |
|
199 { |
|
200 return (1 << VIRTIO_BLK_F_SEG_MAX | 1 << VIRTIO_BLK_F_GEOMETRY); |
|
201 } |
|
202 |
|
203 static void virtio_blk_save(QEMUFile *f, void *opaque) |
|
204 { |
|
205 VirtIOBlock *s = opaque; |
|
206 virtio_save(&s->vdev, f); |
|
207 } |
|
208 |
|
209 static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) |
|
210 { |
|
211 VirtIOBlock *s = opaque; |
|
212 |
|
213 if (version_id != 1) |
|
214 return -EINVAL; |
|
215 |
|
216 virtio_load(&s->vdev, f); |
|
217 |
|
218 return 0; |
|
219 } |
|
220 |
|
221 void virtio_blk_init(VirtIOBindFn bind, void *bind_arg, BlockDriverState *bs) |
|
222 { |
|
223 VirtIOBlock *s; |
|
224 int cylinders, heads, secs; |
|
225 static int virtio_blk_id; |
|
226 |
|
227 s = (VirtIOBlock *)bind(bind_arg, "virtio-blk", 0, VIRTIO_ID_BLOCK, |
|
228 sizeof(struct virtio_blk_config), |
|
229 sizeof(VirtIOBlock)); |
|
230 if (!s) |
|
231 return; |
|
232 |
|
233 s->vdev.get_config = virtio_blk_update_config; |
|
234 s->vdev.get_features = virtio_blk_get_features; |
|
235 s->vdev.reset = virtio_blk_reset; |
|
236 s->bs = bs; |
|
237 bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); |
|
238 bdrv_set_geometry_hint(s->bs, cylinders, heads, secs); |
|
239 |
|
240 s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); |
|
241 |
|
242 register_savevm("virtio-blk", virtio_blk_id++, 1, |
|
243 virtio_blk_save, virtio_blk_load, s); |
|
244 } |