]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/blob - dma-helpers.c
vmdk: Fix creating big description file
[lisovros/qemu_apohw.git] / dma-helpers.c
1 /*
2  * DMA helper functions
3  *
4  * Copyright (c) 2009 Red Hat
5  *
6  * This work is licensed under the terms of the GNU General Public License
7  * (GNU GPL), version 2 or later.
8  */
9
10 #include "sysemu/dma.h"
11 #include "trace.h"
12 #include "qemu/range.h"
13 #include "qemu/thread.h"
14
15 /* #define DEBUG_IOMMU */
16
17 int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
18 {
19     dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
20
21 #define FILLBUF_SIZE 512
22     uint8_t fillbuf[FILLBUF_SIZE];
23     int l;
24     bool error = false;
25
26     memset(fillbuf, c, FILLBUF_SIZE);
27     while (len > 0) {
28         l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
29         error |= address_space_rw(as, addr, fillbuf, l, true);
30         len -= l;
31         addr += l;
32     }
33
34     return error;
35 }
36
37 void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
38                       AddressSpace *as)
39 {
40     qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
41     qsg->nsg = 0;
42     qsg->nalloc = alloc_hint;
43     qsg->size = 0;
44     qsg->as = as;
45     qsg->dev = dev;
46     object_ref(OBJECT(dev));
47 }
48
49 void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
50 {
51     if (qsg->nsg == qsg->nalloc) {
52         qsg->nalloc = 2 * qsg->nalloc + 1;
53         qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
54     }
55     qsg->sg[qsg->nsg].base = base;
56     qsg->sg[qsg->nsg].len = len;
57     qsg->size += len;
58     ++qsg->nsg;
59 }
60
61 void qemu_sglist_destroy(QEMUSGList *qsg)
62 {
63     object_unref(OBJECT(qsg->dev));
64     g_free(qsg->sg);
65     memset(qsg, 0, sizeof(*qsg));
66 }
67
68 typedef struct {
69     BlockDriverAIOCB common;
70     BlockDriverState *bs;
71     BlockDriverAIOCB *acb;
72     QEMUSGList *sg;
73     uint64_t sector_num;
74     DMADirection dir;
75     bool in_cancel;
76     int sg_cur_index;
77     dma_addr_t sg_cur_byte;
78     QEMUIOVector iov;
79     QEMUBH *bh;
80     DMAIOFunc *io_func;
81 } DMAAIOCB;
82
83 static void dma_bdrv_cb(void *opaque, int ret);
84
85 static void reschedule_dma(void *opaque)
86 {
87     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
88
89     qemu_bh_delete(dbs->bh);
90     dbs->bh = NULL;
91     dma_bdrv_cb(dbs, 0);
92 }
93
94 static void continue_after_map_failure(void *opaque)
95 {
96     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
97
98     dbs->bh = qemu_bh_new(reschedule_dma, dbs);
99     qemu_bh_schedule(dbs->bh);
100 }
101
102 static void dma_bdrv_unmap(DMAAIOCB *dbs)
103 {
104     int i;
105
106     for (i = 0; i < dbs->iov.niov; ++i) {
107         dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
108                          dbs->iov.iov[i].iov_len, dbs->dir,
109                          dbs->iov.iov[i].iov_len);
110     }
111     qemu_iovec_reset(&dbs->iov);
112 }
113
114 static void dma_complete(DMAAIOCB *dbs, int ret)
115 {
116     trace_dma_complete(dbs, ret, dbs->common.cb);
117
118     dma_bdrv_unmap(dbs);
119     if (dbs->common.cb) {
120         dbs->common.cb(dbs->common.opaque, ret);
121     }
122     qemu_iovec_destroy(&dbs->iov);
123     if (dbs->bh) {
124         qemu_bh_delete(dbs->bh);
125         dbs->bh = NULL;
126     }
127     if (!dbs->in_cancel) {
128         /* Requests may complete while dma_aio_cancel is in progress.  In
129          * this case, the AIOCB should not be released because it is still
130          * referenced by dma_aio_cancel.  */
131         qemu_aio_release(dbs);
132     }
133 }
134
135 static void dma_bdrv_cb(void *opaque, int ret)
136 {
137     DMAAIOCB *dbs = (DMAAIOCB *)opaque;
138     dma_addr_t cur_addr, cur_len;
139     void *mem;
140
141     trace_dma_bdrv_cb(dbs, ret);
142
143     dbs->acb = NULL;
144     dbs->sector_num += dbs->iov.size / 512;
145     dma_bdrv_unmap(dbs);
146
147     if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
148         dma_complete(dbs, ret);
149         return;
150     }
151
152     while (dbs->sg_cur_index < dbs->sg->nsg) {
153         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
154         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
155         mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
156         if (!mem)
157             break;
158         qemu_iovec_add(&dbs->iov, mem, cur_len);
159         dbs->sg_cur_byte += cur_len;
160         if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
161             dbs->sg_cur_byte = 0;
162             ++dbs->sg_cur_index;
163         }
164     }
165
166     if (dbs->iov.size == 0) {
167         trace_dma_map_wait(dbs);
168         cpu_register_map_client(dbs, continue_after_map_failure);
169         return;
170     }
171
172     dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
173                             dbs->iov.size / 512, dma_bdrv_cb, dbs);
174     assert(dbs->acb);
175 }
176
177 static void dma_aio_cancel(BlockDriverAIOCB *acb)
178 {
179     DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
180
181     trace_dma_aio_cancel(dbs);
182
183     if (dbs->acb) {
184         BlockDriverAIOCB *acb = dbs->acb;
185         dbs->acb = NULL;
186         dbs->in_cancel = true;
187         bdrv_aio_cancel(acb);
188         dbs->in_cancel = false;
189     }
190     dbs->common.cb = NULL;
191     dma_complete(dbs, 0);
192 }
193
194 static const AIOCBInfo dma_aiocb_info = {
195     .aiocb_size         = sizeof(DMAAIOCB),
196     .cancel             = dma_aio_cancel,
197 };
198
199 BlockDriverAIOCB *dma_bdrv_io(
200     BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
201     DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
202     void *opaque, DMADirection dir)
203 {
204     DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
205
206     trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
207
208     dbs->acb = NULL;
209     dbs->bs = bs;
210     dbs->sg = sg;
211     dbs->sector_num = sector_num;
212     dbs->sg_cur_index = 0;
213     dbs->sg_cur_byte = 0;
214     dbs->dir = dir;
215     dbs->io_func = io_func;
216     dbs->bh = NULL;
217     qemu_iovec_init(&dbs->iov, sg->nsg);
218     dma_bdrv_cb(dbs, 0);
219     return &dbs->common;
220 }
221
222
223 BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
224                                 QEMUSGList *sg, uint64_t sector,
225                                 void (*cb)(void *opaque, int ret), void *opaque)
226 {
227     return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
228                        DMA_DIRECTION_FROM_DEVICE);
229 }
230
231 BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
232                                  QEMUSGList *sg, uint64_t sector,
233                                  void (*cb)(void *opaque, int ret), void *opaque)
234 {
235     return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
236                        DMA_DIRECTION_TO_DEVICE);
237 }
238
239
240 static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
241                            DMADirection dir)
242 {
243     uint64_t resid;
244     int sg_cur_index;
245
246     resid = sg->size;
247     sg_cur_index = 0;
248     len = MIN(len, resid);
249     while (len > 0) {
250         ScatterGatherEntry entry = sg->sg[sg_cur_index++];
251         int32_t xfer = MIN(len, entry.len);
252         dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
253         ptr += xfer;
254         len -= xfer;
255         resid -= xfer;
256     }
257
258     return resid;
259 }
260
261 uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
262 {
263     return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
264 }
265
266 uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
267 {
268     return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
269 }
270
271 void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
272                     QEMUSGList *sg, enum BlockAcctType type)
273 {
274     bdrv_acct_start(bs, cookie, sg->size, type);
275 }