|
1 /* Cypress West Bridge API source file (cyasdma.c) |
|
2 ## =========================== |
|
3 ## |
|
4 ## Copyright Cypress Semiconductor Corporation, 2006-2009, |
|
5 ## All Rights Reserved |
|
6 ## UNPUBLISHED, LICENSED SOFTWARE. |
|
7 ## |
|
8 ## CONFIDENTIAL AND PROPRIETARY INFORMATION |
|
9 ## WHICH IS THE PROPERTY OF CYPRESS. |
|
10 ## |
|
11 ## Use of this file is governed |
|
12 ## by the license agreement included in the file |
|
13 ## |
|
14 ## <install>/license/license.txt |
|
15 ## |
|
16 ## where <install> is the Cypress software |
|
17 ## installation root directory path. |
|
18 ## |
|
19 ## =========================== |
|
20 */ |
|
21 |
|
22 #include "cyashal.h" |
|
23 #include "cyasdma.h" |
|
24 #include "cyaslowlevel.h" |
|
25 #include "cyaserr.h" |
|
26 #include "cyasregs.h" |
|
27 |
|
28 /* |
|
29 * Add the DMA queue entry to the free list to be re-used later |
|
30 */ |
|
31 static void |
|
32 CyAsDmaAddRequestToFreeQueue(CyAsDevice *dev_p, CyAsDmaQueueEntry *req_p) |
|
33 { |
|
34 uint32_t imask ; |
|
35 imask = CyAsHalDisableInterrupts() ; |
|
36 |
|
37 req_p->next_p = dev_p->dma_freelist_p ; |
|
38 dev_p->dma_freelist_p = req_p ; |
|
39 |
|
40 CyAsHalEnableInterrupts(imask) ; |
|
41 } |
|
42 |
|
43 /* |
|
44 * Get a DMA queue entry from the free list. |
|
45 */ |
|
46 static CyAsDmaQueueEntry * |
|
47 CyAsDmaGetDmaQueueEntry(CyAsDevice *dev_p) |
|
48 { |
|
49 CyAsDmaQueueEntry *req_p ; |
|
50 uint32_t imask ; |
|
51 |
|
52 CyAsHalAssert(dev_p->dma_freelist_p != 0) ; |
|
53 |
|
54 imask = CyAsHalDisableInterrupts() ; |
|
55 req_p = dev_p->dma_freelist_p ; |
|
56 dev_p->dma_freelist_p = req_p->next_p ; |
|
57 CyAsHalEnableInterrupts(imask) ; |
|
58 |
|
59 return req_p ; |
|
60 } |
|
61 |
|
62 /* |
|
63 * Set the maximum size that the West Bridge hardware can handle in a single DMA operation. This size |
|
64 * may change for the P <-> U endpoints as a function of the endpoint type and whether we are running |
|
65 * at full speed or high speed. |
|
66 */ |
|
67 CyAsReturnStatus_t |
|
68 CyAsDmaSetMaxDmaSize(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, uint32_t size) |
|
69 { |
|
70 /* In MTP mode, EP2 is allowed to have all max sizes. */ |
|
71 if ((!dev_p->is_mtp_firmware) || (ep != 0x02)) |
|
72 { |
|
73 if (size < 64 || size > 1024) |
|
74 return CY_AS_ERROR_INVALID_SIZE ; |
|
75 } |
|
76 |
|
77 CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size ; |
|
78 return CY_AS_ERROR_SUCCESS ; |
|
79 } |
|
80 |
|
81 /* |
|
82 * The callback for requests sent to West Bridge to relay endpoint data. Endpoint |
|
83 * data for EP0 and EP1 are sent using mailbox requests. This is the callback that |
|
84 * is called when a response to a mailbox request to send data is received. |
|
85 */ |
|
86 static void |
|
87 CyAsDmaRequestCallback( |
|
88 CyAsDevice *dev_p, |
|
89 uint8_t context, |
|
90 CyAsLLRequestResponse *req_p, |
|
91 CyAsLLRequestResponse *resp_p, |
|
92 CyAsReturnStatus_t ret) |
|
93 { |
|
94 uint16_t v ; |
|
95 uint16_t datacnt ; |
|
96 CyAsEndPointNumber_t ep ; |
|
97 |
|
98 (void)context ; |
|
99 |
|
100 CyAsLogDebugMessage(5, "CyAsDmaRequestCallback called") ; |
|
101 |
|
102 /* |
|
103 * Extract the return code from the firmware |
|
104 */ |
|
105 if (ret == CY_AS_ERROR_SUCCESS) |
|
106 { |
|
107 if (CyAsLLRequestResponse_GetCode(resp_p) != CY_RESP_SUCCESS_FAILURE) |
|
108 ret = CY_AS_ERROR_INVALID_RESPONSE ; |
|
109 else |
|
110 ret = CyAsLLRequestResponse_GetWord(resp_p, 0) ; |
|
111 } |
|
112 |
|
113 /* |
|
114 * Extract the endpoint number and the transferred byte count |
|
115 * from the request. |
|
116 */ |
|
117 v = CyAsLLRequestResponse_GetWord(req_p, 0) ; |
|
118 ep = (CyAsEndPointNumber_t)((v >> 13) & 0x01) ; |
|
119 |
|
120 if (ret == CY_AS_ERROR_SUCCESS) |
|
121 { |
|
122 /* |
|
123 * If the firmware returns success, all of the data requested was |
|
124 * transferred. There are no partial transfers. |
|
125 */ |
|
126 datacnt = v & 0x3FF ; |
|
127 } |
|
128 else |
|
129 { |
|
130 /* |
|
131 * If the firmware returned an error, no data was transferred. |
|
132 */ |
|
133 datacnt = 0 ; |
|
134 } |
|
135 |
|
136 /* |
|
137 * Queue the request and response data structures for use with the |
|
138 * next EP0 or EP1 request. |
|
139 */ |
|
140 if (ep == 0) |
|
141 { |
|
142 dev_p->usb_ep0_dma_req = req_p ; |
|
143 dev_p->usb_ep0_dma_resp = resp_p ; |
|
144 } |
|
145 else |
|
146 { |
|
147 dev_p->usb_ep1_dma_req = req_p ; |
|
148 dev_p->usb_ep1_dma_resp = resp_p ; |
|
149 } |
|
150 |
|
151 /* |
|
152 * Call the DMA complete function so we can signal that this portion of the |
|
153 * transfer has completed. If the low level request was canceled, we do not |
|
154 * need to signal the completed function as the only way a cancel can happen |
|
155 * is via the DMA cancel function. |
|
156 */ |
|
157 if (ret != CY_AS_ERROR_CANCELED) |
|
158 CyAsDmaCompletedCallback(dev_p->tag, ep, datacnt, ret) ; |
|
159 } |
|
160 |
|
161 /* |
|
162 * Set the DRQ mask register for the given endpoint number. If state is |
|
163 * CyTrue, the DRQ interrupt for the given endpoint is enabled, otherwise |
|
164 * it is disabled. |
|
165 */ |
|
166 static void |
|
167 CyAsDmaSetDrq(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, CyBool state) |
|
168 { |
|
169 uint16_t mask ; |
|
170 uint16_t v ; |
|
171 uint32_t intval ; |
|
172 |
|
173 /* |
|
174 * There are not DRQ register bits for EP0 and EP1 |
|
175 */ |
|
176 if (ep == 0 || ep == 1) |
|
177 return ; |
|
178 |
|
179 /* |
|
180 * Disable interrupts while we do this to be sure the state of the |
|
181 * DRQ mask register is always well defined. |
|
182 */ |
|
183 intval = CyAsHalDisableInterrupts() ; |
|
184 |
|
185 /* |
|
186 * Set the DRQ bit to the given state for the ep given |
|
187 */ |
|
188 mask = (1 << ep) ; |
|
189 v = CyAsHalReadRegister(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK) ; |
|
190 |
|
191 if (state) |
|
192 v |= mask ; |
|
193 else |
|
194 v &= ~mask ; |
|
195 |
|
196 CyAsHalWriteRegister(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v) ; |
|
197 CyAsHalEnableInterrupts(intval) ; |
|
198 } |
|
199 |
|
200 /* |
|
201 * Send the next DMA request for the endpoint given |
|
202 */ |
|
203 static void |
|
204 CyAsDmaSendNextDmaRequest(CyAsDevice *dev_p, CyAsDmaEndPoint *ep_p) |
|
205 { |
|
206 uint32_t datacnt ; |
|
207 void *buf_p ; |
|
208 CyAsDmaQueueEntry *dma_p ; |
|
209 |
|
210 CyAsLogDebugMessage(6, "CyAsDmaSendNextDmaRequest called") ; |
|
211 |
|
212 /* If the queue is empty, nothing to do */ |
|
213 dma_p = ep_p->queue_p ; |
|
214 if (dma_p == 0) |
|
215 { |
|
216 /* |
|
217 * There are not pending DMA requests for this endpoint. Disable |
|
218 * the DRQ mask bits to insure no interrupts will be triggered by this |
|
219 * endpoint until someone is interested in the data. |
|
220 */ |
|
221 CyAsDmaSetDrq(dev_p, ep_p->ep, CyFalse) ; |
|
222 return ; |
|
223 } |
|
224 |
|
225 CyAsDmaEndPointSetRunning(ep_p) ; |
|
226 |
|
227 /* |
|
228 * Get the number of words that still need to be xferred in |
|
229 * this request. |
|
230 */ |
|
231 datacnt = dma_p->size - dma_p->offset ; |
|
232 CyAsHalAssert(datacnt >= 0) ; |
|
233 |
|
234 /* |
|
235 * The HAL layer should never limit the size of the transfer to |
|
236 * something less than the maxhwdata otherwise, the data will be |
|
237 * sent in packets that are not correct in size. |
|
238 */ |
|
239 CyAsHalAssert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE || ep_p->maxhaldata >= ep_p->maxhwdata) ; |
|
240 |
|
241 /* |
|
242 * Update the number of words that need to be xferred yet |
|
243 * based on the limits of the HAL layer. |
|
244 */ |
|
245 if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) |
|
246 { |
|
247 if (datacnt > ep_p->maxhwdata) |
|
248 datacnt = ep_p->maxhwdata ; |
|
249 } |
|
250 else |
|
251 { |
|
252 if (datacnt > ep_p->maxhaldata) |
|
253 datacnt = ep_p->maxhaldata ; |
|
254 } |
|
255 |
|
256 /* |
|
257 * Find a pointer to the data that needs to be transferred |
|
258 */ |
|
259 buf_p = (((char *)dma_p->buf_p) + dma_p->offset); |
|
260 |
|
261 /* |
|
262 * Mark a request in transit |
|
263 */ |
|
264 CyAsDmaEndPointSetInTransit(ep_p) ; |
|
265 |
|
266 if (ep_p->ep == 0 || ep_p->ep == 1) |
|
267 { |
|
268 /* |
|
269 * If this is a WRITE request on EP0 and EP1, we write the data via an EP_DATA request |
|
270 * to West Bridge via the mailbox registers. If this is a READ request, we do nothing and the data will |
|
271 * arrive via an EP_DATA request from West Bridge. In the request handler for the USB context we will pass |
|
272 * the data back into the DMA module. |
|
273 */ |
|
274 if (dma_p->readreq == CyFalse) |
|
275 { |
|
276 uint16_t v ; |
|
277 uint16_t len ; |
|
278 CyAsLLRequestResponse *resp_p ; |
|
279 CyAsLLRequestResponse *req_p ; |
|
280 CyAsReturnStatus_t ret ; |
|
281 |
|
282 len = (uint16_t)(datacnt / 2) ; |
|
283 if (datacnt % 2) |
|
284 len++ ; |
|
285 |
|
286 len++ ; |
|
287 |
|
288 if (ep_p->ep == 0) |
|
289 { |
|
290 req_p = dev_p->usb_ep0_dma_req ; |
|
291 resp_p = dev_p->usb_ep0_dma_resp ; |
|
292 dev_p->usb_ep0_dma_req = 0 ; |
|
293 dev_p->usb_ep0_dma_resp = 0 ; |
|
294 } |
|
295 else |
|
296 { |
|
297 req_p = dev_p->usb_ep1_dma_req ; |
|
298 resp_p = dev_p->usb_ep1_dma_resp ; |
|
299 dev_p->usb_ep1_dma_req = 0 ; |
|
300 dev_p->usb_ep1_dma_resp = 0 ; |
|
301 } |
|
302 |
|
303 CyAsHalAssert(req_p != 0) ; |
|
304 CyAsHalAssert(resp_p != 0) ; |
|
305 CyAsHalAssert(len <= 64) ; |
|
306 |
|
307 CyAsLLInitRequest(req_p, CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, len) ; |
|
308 |
|
309 v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14)) ; |
|
310 if (dma_p->offset == 0) |
|
311 v |= (1 << 12) ; /* Set the first packet bit */ |
|
312 if (dma_p->offset + datacnt == dma_p->size) |
|
313 v |= (1 << 11) ; /* Set the last packet bit */ |
|
314 |
|
315 CyAsLLRequestResponse_SetWord(req_p, 0, v) ; |
|
316 CyAsLLRequestResponse_Pack(req_p, 1, datacnt, buf_p) ; |
|
317 |
|
318 CyAsLLInitResponse(resp_p, 1) ; |
|
319 |
|
320 ret = CyAsLLSendRequest(dev_p, req_p, resp_p, CyFalse, CyAsDmaRequestCallback) ; |
|
321 if (ret == CY_AS_ERROR_SUCCESS) |
|
322 CyAsLogDebugMessage(5, "+++ Send EP 0/1 data via mailbox registers") ; |
|
323 else |
|
324 CyAsLogDebugMessage(5, "+++ Error Sending EP 0/1 data via mailbox registers - CY_AS_ERROR_TIMEOUT") ; |
|
325 |
|
326 if (ret != CY_AS_ERROR_SUCCESS) |
|
327 CyAsDmaCompletedCallback(dev_p->tag, ep_p->ep, 0, ret) ; |
|
328 } |
|
329 } |
|
330 else |
|
331 { |
|
332 /* |
|
333 * This is a DMA request on an endpoint that is accessible via the P port. Ask the |
|
334 * HAL DMA capabilities to perform this. The amount of data sent is limited by the |
|
335 * HAL max size as well as what we need to send. If the ep_p->maxhaldata is set to |
|
336 * a value larger than the endpoint buffer size, then we will pass more than a single |
|
337 * buffer worth of data to the HAL layer and expect the HAL layer to divide the data |
|
338 * into packets. The last parameter here (ep_p->maxhwdata) gives the packet size for |
|
339 * the data so the HAL layer knows what the packet size should be. |
|
340 */ |
|
341 if (CyAsDmaEndPointIsDirectionIn(ep_p)) |
|
342 CyAsHalDmaSetupWrite(dev_p->tag, ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ; |
|
343 else |
|
344 CyAsHalDmaSetupRead(dev_p->tag, ep_p->ep, buf_p, datacnt, ep_p->maxhwdata) ; |
|
345 |
|
346 /* |
|
347 * The DRQ interrupt for this endpoint should be enabled so that the data |
|
348 * transfer progresses at interrupt time. |
|
349 */ |
|
350 CyAsDmaSetDrq(dev_p, ep_p->ep, CyTrue) ; |
|
351 } |
|
352 } |
|
353 |
|
354 /* |
|
355 * This function is called when the HAL layer has completed the last requested DMA |
|
356 * operation. This function sends/receives the next batch of data associated with the |
|
357 * current DMA request, or it is is complete, moves to the next DMA request. |
|
358 */ |
|
359 void |
|
360 CyAsDmaCompletedCallback(CyAsHalDeviceTag tag, CyAsEndPointNumber_t ep, uint32_t cnt, CyAsReturnStatus_t status) |
|
361 { |
|
362 uint32_t mask ; |
|
363 CyAsDmaQueueEntry *req_p ; |
|
364 CyAsDmaEndPoint *ep_p ; |
|
365 CyAsDevice *dev_p = CyAsDeviceFindFromTag(tag) ; |
|
366 |
|
367 /* Make sure the HAL layer gave us good parameters */ |
|
368 CyAsHalAssert(dev_p != 0) ; |
|
369 CyAsHalAssert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ; |
|
370 CyAsHalAssert(ep < 16) ; |
|
371 |
|
372 |
|
373 /* Get the endpoint ptr */ |
|
374 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
375 CyAsHalAssert(ep_p->queue_p != 0) ; |
|
376 |
|
377 /* Get a pointer to the current entry in the queue */ |
|
378 mask = CyAsHalDisableInterrupts() ; |
|
379 req_p = ep_p->queue_p ; |
|
380 |
|
381 /* Update the offset to reflect the data actually received or sent */ |
|
382 req_p->offset += cnt ; |
|
383 |
|
384 /* |
|
385 * If we are still sending/receiving the current packet, send/receive the next chunk |
|
386 * Basically we keep going if we have not sent/received enough data, and we are not doing |
|
387 * a packet operation, and the last packet sent or received was a full sized packet. In |
|
388 * other words, when we are NOT doing a packet operation, a less than full size packet |
|
389 * (a short packet) will terminate the operation. |
|
390 * |
|
391 * Note: If this is EP1 request and the request has timed out, it means the buffer is not free. |
|
392 * We have to resend the data. |
|
393 * |
|
394 * Note: For the MTP data transfers, the DMA transfer for the next packet can only be started |
|
395 * asynchronously, after a firmware event notifies that the device is ready. |
|
396 */ |
|
397 if (((req_p->offset != req_p->size) && (req_p->packet == CyFalse) && ((cnt == ep_p->maxhaldata) || |
|
398 ((cnt == ep_p->maxhwdata) && ((ep != CY_AS_MTP_READ_ENDPOINT) || (cnt == dev_p->usb_max_tx_size))))) |
|
399 || ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) |
|
400 { |
|
401 CyAsHalEnableInterrupts(mask) ; |
|
402 |
|
403 /* |
|
404 * And send the request again to send the next block of data. Special handling for |
|
405 * MTP transfers on EPs 2 and 6. The SendNextRequest will be processed based on the |
|
406 * event sent by the firmware. |
|
407 */ |
|
408 if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || ( |
|
409 (ep == CY_AS_MTP_READ_ENDPOINT) && (!CyAsDmaEndPointIsDirectionIn (ep_p)))) |
|
410 CyAsDmaEndPointSetStopped(ep_p) ; |
|
411 else |
|
412 CyAsDmaSendNextDmaRequest(dev_p, ep_p) ; |
|
413 } |
|
414 else |
|
415 { |
|
416 /* |
|
417 * We get here if ... |
|
418 * we have sent or received all of the data |
|
419 * or |
|
420 * we are doing a packet operation |
|
421 * or |
|
422 * we receive a short packet |
|
423 */ |
|
424 |
|
425 /* |
|
426 * Remove this entry from the DMA queue for this endpoint. |
|
427 */ |
|
428 CyAsDmaEndPointClearInTransit(ep_p) ; |
|
429 ep_p->queue_p = req_p->next_p ; |
|
430 if (ep_p->last_p == req_p) |
|
431 { |
|
432 /* |
|
433 * We have removed the last packet from the DMA queue, disable the |
|
434 * interrupt associated with this interrupt. |
|
435 */ |
|
436 ep_p->last_p = 0 ; |
|
437 CyAsHalEnableInterrupts(mask) ; |
|
438 CyAsDmaSetDrq(dev_p, ep, CyFalse) ; |
|
439 } |
|
440 else |
|
441 CyAsHalEnableInterrupts(mask) ; |
|
442 |
|
443 if (req_p->cb) |
|
444 { |
|
445 /* |
|
446 * If the request has a callback associated with it, call the callback |
|
447 * to tell the interested party that this DMA request has completed. |
|
448 * |
|
449 * Note, we set the InCallback bit to insure that we cannot recursively |
|
450 * call an API function that is synchronous only from a callback. |
|
451 */ |
|
452 CyAsDeviceSetInCallback(dev_p) ; |
|
453 (*req_p->cb)(dev_p, ep, req_p->buf_p, req_p->offset, status) ; |
|
454 CyAsDeviceClearInCallback(dev_p) ; |
|
455 } |
|
456 |
|
457 /* |
|
458 * We are done with this request, put it on the freelist to be |
|
459 * reused at a later time. |
|
460 */ |
|
461 CyAsDmaAddRequestToFreeQueue(dev_p, req_p) ; |
|
462 |
|
463 if (ep_p->queue_p == 0) |
|
464 { |
|
465 /* |
|
466 * If the endpoint is out of DMA entries, set it the endpoint as |
|
467 * stopped. |
|
468 */ |
|
469 CyAsDmaEndPointSetStopped(ep_p) ; |
|
470 |
|
471 /* |
|
472 * The DMA queue is empty, wake any task waiting on the QUEUE to |
|
473 * drain. |
|
474 */ |
|
475 if (CyAsDmaEndPointIsSleeping(ep_p)) |
|
476 { |
|
477 CyAsDmaEndPointSetWakeState(ep_p) ; |
|
478 CyAsHalWake(&ep_p->channel) ; |
|
479 } |
|
480 } |
|
481 else |
|
482 { |
|
483 /* |
|
484 * If the queued operation is a MTP transfer, wait until firmware event |
|
485 * before sending down the next DMA request. |
|
486 */ |
|
487 if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || ( |
|
488 (ep == CY_AS_MTP_READ_ENDPOINT) && (!CyAsDmaEndPointIsDirectionIn (ep_p)))) |
|
489 CyAsDmaEndPointSetStopped(ep_p) ; |
|
490 else |
|
491 CyAsDmaSendNextDmaRequest(dev_p, ep_p) ; |
|
492 } |
|
493 } |
|
494 } |
|
495 |
|
496 /* |
|
497 * This function is used to kick start DMA on a given channel. If DMA is already running |
|
498 * on the given endpoint, nothing happens. If DMA is not running, the first entry is pulled |
|
499 * from the DMA queue and sent/recevied to/from the West Bridge device. |
|
500 */ |
|
501 CyAsReturnStatus_t |
|
502 CyAsDmaKickStart(CyAsDevice *dev_p, CyAsEndPointNumber_t ep) |
|
503 { |
|
504 CyAsDmaEndPoint *ep_p ; |
|
505 CyAsHalAssert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE) ; |
|
506 |
|
507 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
508 |
|
509 /* We are already running */ |
|
510 if (CyAsDmaEndPointIsRunning(ep_p)) |
|
511 return CY_AS_ERROR_SUCCESS ; |
|
512 |
|
513 CyAsDmaSendNextDmaRequest(dev_p, ep_p); |
|
514 return CY_AS_ERROR_SUCCESS ; |
|
515 } |
|
516 |
|
517 /* |
|
518 * This function stops the given endpoint. Stopping and endpoint cancels |
|
519 * any pending DMA operations and frees all resources associated with the |
|
520 * given endpoint. |
|
521 */ |
|
522 static CyAsReturnStatus_t |
|
523 CyAsDmaStopEndPoint(CyAsDevice *dev_p, CyAsEndPointNumber_t ep) |
|
524 { |
|
525 CyAsReturnStatus_t ret ; |
|
526 CyAsDmaEndPoint *ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
527 |
|
528 /* |
|
529 * Cancel any pending DMA requests associated with this endpoint. This |
|
530 * cancels any DMA requests at the HAL layer as well as dequeues any request |
|
531 * that is currently pending. |
|
532 */ |
|
533 ret = CyAsDmaCancel(dev_p, ep, CY_AS_ERROR_CANCELED) ; |
|
534 if (ret != CY_AS_ERROR_SUCCESS) |
|
535 return ret ; |
|
536 |
|
537 /* |
|
538 * Destroy the sleep channel |
|
539 */ |
|
540 if (!CyAsHalDestroySleepChannel(&ep_p->channel) && ret == CY_AS_ERROR_SUCCESS) |
|
541 ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED ; |
|
542 |
|
543 /* |
|
544 * Free the memory associated with this endpoint |
|
545 */ |
|
546 CyAsHalFree(ep_p) ; |
|
547 |
|
548 /* |
|
549 * Set the data structure ptr to something sane since the |
|
550 * previous pointer is now free. |
|
551 */ |
|
552 dev_p->endp[ep] = 0 ; |
|
553 |
|
554 return ret ; |
|
555 } |
|
556 |
|
557 /* |
|
558 * This method stops the USB stack. This is an internal function that does |
|
559 * all of the work of destroying the USB stack without the protections that |
|
560 * we provide to the API (i.e. stopping at stack that is not running). |
|
561 */ |
|
562 static CyAsReturnStatus_t |
|
563 CyAsDmaStopInternal(CyAsDevice *dev_p) |
|
564 { |
|
565 CyAsReturnStatus_t ret = CY_AS_ERROR_SUCCESS ; |
|
566 CyAsReturnStatus_t lret ; |
|
567 CyAsEndPointNumber_t i ; |
|
568 |
|
569 /* |
|
570 * Stop all of the endpoints. This cancels all DMA requests, and |
|
571 * frees all resources associated with each endpoint. |
|
572 */ |
|
573 for(i = 0 ; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])) ; i++) |
|
574 { |
|
575 lret = CyAsDmaStopEndPoint(dev_p, i) ; |
|
576 if (lret != CY_AS_ERROR_SUCCESS && ret == CY_AS_ERROR_SUCCESS) |
|
577 ret = lret ; |
|
578 } |
|
579 |
|
580 /* |
|
581 * Now, free the list of DMA requests structures that we use to manage |
|
582 * DMA requests. |
|
583 */ |
|
584 while (dev_p->dma_freelist_p) |
|
585 { |
|
586 CyAsDmaQueueEntry *req_p ; |
|
587 uint32_t imask = CyAsHalDisableInterrupts() ; |
|
588 |
|
589 req_p = dev_p->dma_freelist_p ; |
|
590 dev_p->dma_freelist_p = req_p->next_p ; |
|
591 |
|
592 CyAsHalEnableInterrupts(imask) ; |
|
593 |
|
594 CyAsHalFree(req_p) ; |
|
595 } |
|
596 |
|
597 CyAsLLDestroyRequest(dev_p, dev_p->usb_ep0_dma_req) ; |
|
598 CyAsLLDestroyRequest(dev_p, dev_p->usb_ep1_dma_req) ; |
|
599 CyAsLLDestroyResponse(dev_p, dev_p->usb_ep0_dma_resp) ; |
|
600 CyAsLLDestroyResponse(dev_p, dev_p->usb_ep1_dma_resp) ; |
|
601 |
|
602 return ret ; |
|
603 } |
|
604 |
|
605 |
|
606 /* |
|
607 * CyAsDmaStop() |
|
608 * |
|
609 * This function shuts down the DMA module. All resources associated with the DMA module |
|
610 * will be freed. This routine is the API stop function. It insures that we are stopping |
|
611 * a stack that is actually running and then calls the internal function to do the work. |
|
612 */ |
|
613 CyAsReturnStatus_t |
|
614 CyAsDmaStop(CyAsDevice *dev_p) |
|
615 { |
|
616 CyAsReturnStatus_t ret ; |
|
617 |
|
618 ret = CyAsDmaStopInternal(dev_p) ; |
|
619 CyAsDeviceSetDmaStopped(dev_p) ; |
|
620 |
|
621 return ret ; |
|
622 } |
|
623 |
|
624 /* |
|
625 * CyAsDmaStart() |
|
626 * |
|
627 * This function intializes the DMA module to insure it is up and running. |
|
628 */ |
|
629 CyAsReturnStatus_t |
|
630 CyAsDmaStart(CyAsDevice *dev_p) |
|
631 { |
|
632 CyAsEndPointNumber_t i ; |
|
633 uint16_t cnt ; |
|
634 |
|
635 if (CyAsDeviceIsDmaRunning(dev_p)) |
|
636 return CY_AS_ERROR_ALREADY_RUNNING ; |
|
637 |
|
638 /* |
|
639 * Pre-allocate DMA Queue structures to be used in the interrupt context |
|
640 */ |
|
641 for(cnt = 0 ; cnt < 32 ; cnt++) |
|
642 { |
|
643 CyAsDmaQueueEntry *entry_p = (CyAsDmaQueueEntry *)CyAsHalAlloc(sizeof(CyAsDmaQueueEntry)) ; |
|
644 if (entry_p == 0) |
|
645 { |
|
646 CyAsDmaStopInternal(dev_p) ; |
|
647 return CY_AS_ERROR_OUT_OF_MEMORY ; |
|
648 } |
|
649 CyAsDmaAddRequestToFreeQueue(dev_p, entry_p) ; |
|
650 } |
|
651 |
|
652 /* |
|
653 * Pre-allocate the DMA requests for sending EP0 and EP1 data to West Bridge |
|
654 */ |
|
655 dev_p->usb_ep0_dma_req = CyAsLLCreateRequest(dev_p, CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64) ; |
|
656 dev_p->usb_ep1_dma_req = CyAsLLCreateRequest(dev_p, CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64) ; |
|
657 if (dev_p->usb_ep0_dma_req == 0 || dev_p->usb_ep1_dma_req == 0) |
|
658 { |
|
659 CyAsDmaStopInternal(dev_p) ; |
|
660 return CY_AS_ERROR_OUT_OF_MEMORY ; |
|
661 } |
|
662 dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req ; |
|
663 |
|
664 dev_p->usb_ep0_dma_resp = CyAsLLCreateResponse(dev_p, 1) ; |
|
665 dev_p->usb_ep1_dma_resp = CyAsLLCreateResponse(dev_p, 1) ; |
|
666 if (dev_p->usb_ep0_dma_resp == 0 || dev_p->usb_ep1_dma_resp == 0) |
|
667 { |
|
668 CyAsDmaStopInternal(dev_p) ; |
|
669 return CY_AS_ERROR_OUT_OF_MEMORY ; |
|
670 } |
|
671 dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp ; |
|
672 |
|
673 /* |
|
674 * Set the dev_p->endp to all zeros to insure cleanup is possible if |
|
675 * an error occurs during initialization. |
|
676 */ |
|
677 CyAsHalMemSet(dev_p->endp, 0, sizeof(dev_p->endp)) ; |
|
678 |
|
679 /* |
|
680 * Now, iterate through each of the endpoints and initialize each |
|
681 * one. |
|
682 */ |
|
683 for(i = 0 ; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]) ; i++) |
|
684 { |
|
685 dev_p->endp[i] = (CyAsDmaEndPoint *)CyAsHalAlloc(sizeof(CyAsDmaEndPoint)) ; |
|
686 if (dev_p->endp[i] == 0) |
|
687 { |
|
688 CyAsDmaStopInternal(dev_p) ; |
|
689 return CY_AS_ERROR_OUT_OF_MEMORY ; |
|
690 } |
|
691 CyAsHalMemSet(dev_p->endp[i], 0, sizeof(CyAsDmaEndPoint)) ; |
|
692 |
|
693 dev_p->endp[i]->ep = i ; |
|
694 dev_p->endp[i]->queue_p = 0 ; |
|
695 dev_p->endp[i]->last_p = 0 ; |
|
696 |
|
697 CyAsDmaSetDrq(dev_p, i, CyFalse) ; |
|
698 |
|
699 if (!CyAsHalCreateSleepChannel(&dev_p->endp[i]->channel)) |
|
700 return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED ; |
|
701 } |
|
702 |
|
703 /* |
|
704 * Tell the HAL layer who to call when the HAL layer completes a DMA request |
|
705 */ |
|
706 CyAsHalDmaRegisterCallback(dev_p->tag, CyAsDmaCompletedCallback) ; |
|
707 |
|
708 /* |
|
709 * Mark DMA as up and running on this device |
|
710 */ |
|
711 CyAsDeviceSetDmaRunning(dev_p) ; |
|
712 |
|
713 return CY_AS_ERROR_SUCCESS ; |
|
714 } |
|
715 |
|
716 /* |
|
717 * Wait for all entries in the DMA queue associated the given endpoint to be drained. This |
|
718 * function will not return until all the DMA data has been transferred. |
|
719 */ |
|
720 CyAsReturnStatus_t |
|
721 CyAsDmaDrainQueue(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, CyBool kickstart) |
|
722 { |
|
723 CyAsDmaEndPoint *ep_p ; |
|
724 int loopcount = 1000 ; |
|
725 uint32_t mask ; |
|
726 |
|
727 /* |
|
728 * Make sure the endpoint is valid |
|
729 */ |
|
730 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) |
|
731 return CY_AS_ERROR_INVALID_ENDPOINT ; |
|
732 |
|
733 /* Get the endpoint pointer based on the endpoint number */ |
|
734 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
735 |
|
736 /* |
|
737 * If the endpoint is empty of traffic, we return |
|
738 * with success immediately |
|
739 */ |
|
740 mask = CyAsHalDisableInterrupts() ; |
|
741 if (ep_p->queue_p == 0) |
|
742 { |
|
743 CyAsHalEnableInterrupts(mask) ; |
|
744 return CY_AS_ERROR_SUCCESS ; |
|
745 } |
|
746 else |
|
747 { |
|
748 /* |
|
749 * Add 10 seconds to the time out value for each 64 KB segment |
|
750 * of data to be transferred. |
|
751 */ |
|
752 if (ep_p->queue_p->size > 0x10000) |
|
753 loopcount += ((ep_p->queue_p->size / 0x10000) * 1000) ; |
|
754 } |
|
755 CyAsHalEnableInterrupts(mask) ; |
|
756 |
|
757 /* If we are already sleeping on this endpoint, it is an error */ |
|
758 if (CyAsDmaEndPointIsSleeping(ep_p)) |
|
759 return CY_AS_ERROR_NESTED_SLEEP ; |
|
760 |
|
761 /* |
|
762 * We disable the endpoint while the queue drains to |
|
763 * prevent any additional requests from being queued while we are waiting |
|
764 */ |
|
765 CyAsDmaEnableEndPoint(dev_p, ep, CyFalse, CyAsDirectionDontChange) ; |
|
766 |
|
767 if (kickstart) |
|
768 { |
|
769 /* |
|
770 * Now, kick start the DMA if necessary |
|
771 */ |
|
772 CyAsDmaKickStart(dev_p, ep) ; |
|
773 } |
|
774 |
|
775 /* |
|
776 * Check one last time before we begin sleeping to see if the |
|
777 * queue is drained. |
|
778 */ |
|
779 if (ep_p->queue_p == 0) |
|
780 { |
|
781 CyAsDmaEnableEndPoint(dev_p, ep, CyTrue, CyAsDirectionDontChange) ; |
|
782 return CY_AS_ERROR_SUCCESS ; |
|
783 } |
|
784 |
|
785 while (loopcount-- > 0) |
|
786 { |
|
787 /* |
|
788 * Sleep for 10 ms maximum (per loop) while waiting for the transfer |
|
789 * to complete. |
|
790 */ |
|
791 CyAsDmaEndPointSetSleepState(ep_p) ; |
|
792 CyAsHalSleepOn(&ep_p->channel, 10) ; |
|
793 |
|
794 /* If we timed out, the sleep bit will still be set */ |
|
795 CyAsDmaEndPointSetWakeState(ep_p) ; |
|
796 |
|
797 /* Check the queue to see if is drained */ |
|
798 if (ep_p->queue_p == 0) |
|
799 { |
|
800 /* |
|
801 * Clear the endpoint running and in transit flags for the endpoint, |
|
802 * now that its DMA queue is empty. |
|
803 */ |
|
804 CyAsDmaEndPointClearInTransit(ep_p) ; |
|
805 CyAsDmaEndPointSetStopped(ep_p) ; |
|
806 |
|
807 CyAsDmaEnableEndPoint(dev_p, ep, CyTrue, CyAsDirectionDontChange) ; |
|
808 return CY_AS_ERROR_SUCCESS ; |
|
809 } |
|
810 } |
|
811 |
|
812 /* |
|
813 * The DMA operation that has timed out can be cancelled, so that later |
|
814 * operations on this queue can proceed. |
|
815 */ |
|
816 CyAsDmaCancel(dev_p, ep, CY_AS_ERROR_TIMEOUT) ; |
|
817 CyAsDmaEnableEndPoint(dev_p, ep, CyTrue, CyAsDirectionDontChange) ; |
|
818 return CY_AS_ERROR_TIMEOUT ; |
|
819 } |
|
820 |
|
821 /* |
|
822 * This function queues a write request in the DMA queue for a given endpoint. The direction of the |
|
823 * entry will be inferred from the endpoint direction. |
|
824 */ |
|
825 CyAsReturnStatus_t |
|
826 CyAsDmaQueueRequest(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, void *mem_p, uint32_t size, CyBool pkt, CyBool readreq, CyAsDmaCallback cb) |
|
827 { |
|
828 uint32_t mask ; |
|
829 CyAsDmaQueueEntry *entry_p ; |
|
830 CyAsDmaEndPoint *ep_p ; |
|
831 |
|
832 /* |
|
833 * Make sure the endpoint is valid |
|
834 */ |
|
835 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) |
|
836 return CY_AS_ERROR_INVALID_ENDPOINT ; |
|
837 |
|
838 /* Get the endpoint pointer based on the endpoint number */ |
|
839 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
840 |
|
841 if (!CyAsDmaEndPointIsEnabled(ep_p)) |
|
842 return CY_AS_ERROR_ENDPOINT_DISABLED ; |
|
843 |
|
844 entry_p = CyAsDmaGetDmaQueueEntry(dev_p) ; |
|
845 |
|
846 entry_p->buf_p = mem_p ; |
|
847 entry_p->cb = cb ; |
|
848 entry_p->size = size ; |
|
849 entry_p->offset = 0 ; |
|
850 entry_p->packet = pkt ; |
|
851 entry_p->readreq = readreq ; |
|
852 |
|
853 mask = CyAsHalDisableInterrupts() ; |
|
854 entry_p->next_p = 0 ; |
|
855 if (ep_p->last_p) |
|
856 ep_p->last_p->next_p = entry_p ; |
|
857 ep_p->last_p = entry_p ; |
|
858 if (ep_p->queue_p == 0) |
|
859 ep_p->queue_p = entry_p ; |
|
860 CyAsHalEnableInterrupts(mask) ; |
|
861 |
|
862 return CY_AS_ERROR_SUCCESS ; |
|
863 } |
|
864 |
|
865 /* |
|
866 * This function enables or disables and endpoint for DMA queueing. If an endpoint is disabled, any queued |
|
867 * requests continue to be processed, but no new requests can be queued. |
|
868 */ |
|
869 CyAsReturnStatus_t |
|
870 CyAsDmaEnableEndPoint(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, CyBool enable, CyAsDmaDirection dir) |
|
871 { |
|
872 CyAsDmaEndPoint *ep_p ; |
|
873 |
|
874 /* |
|
875 * Make sure the endpoint is valid |
|
876 */ |
|
877 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) |
|
878 return CY_AS_ERROR_INVALID_ENDPOINT ; |
|
879 |
|
880 /* Get the endpoint pointer based on the endpoint number */ |
|
881 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
882 |
|
883 if (dir == CyAsDirectionOut) |
|
884 CyAsDmaEndPointSetDirectionOut(ep_p) ; |
|
885 else if (dir == CyAsDirectionIn) |
|
886 CyAsDmaEndPointSetDirectionIn(ep_p) ; |
|
887 |
|
888 /* |
|
889 * Get the maximum size of data buffer the HAL layer can accept. This is used when |
|
890 * the DMA module is sending DMA requests to the HAL. The DMA module will never send |
|
891 * down a request that is greater than this value. |
|
892 * |
|
893 * For EP0 and EP1, we can send no more than 64 bytes of data at one time as this is the |
|
894 * maximum size of a packet that can be sent via these endpoints. |
|
895 */ |
|
896 if (ep == 0 || ep == 1) |
|
897 ep_p->maxhaldata = 64 ; |
|
898 else |
|
899 ep_p->maxhaldata = CyAsHalDmaMaxRequestSize(dev_p->tag, ep) ; |
|
900 |
|
901 if (enable) |
|
902 CyAsDmaEndPointEnable(ep_p) ; |
|
903 else |
|
904 CyAsDmaEndPointDisable(ep_p) ; |
|
905 |
|
906 return CY_AS_ERROR_SUCCESS ; |
|
907 } |
|
908 |
|
909 /* |
|
910 * This function cancels any DMA operations pending with the HAL layer as well |
|
911 * as any DMA operation queued on the endpoint. |
|
912 */ |
|
913 CyAsReturnStatus_t |
|
914 CyAsDmaCancel( |
|
915 CyAsDevice *dev_p, |
|
916 CyAsEndPointNumber_t ep, |
|
917 CyAsReturnStatus_t err) |
|
918 { |
|
919 uint32_t mask ; |
|
920 CyAsDmaEndPoint *ep_p ; |
|
921 CyAsDmaQueueEntry *entry_p ; |
|
922 CyBool epstate ; |
|
923 |
|
924 /* |
|
925 * Make sure the endpoint is valid |
|
926 */ |
|
927 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0])) |
|
928 return CY_AS_ERROR_INVALID_ENDPOINT ; |
|
929 |
|
930 /* Get the endpoint pointer based on the endpoint number */ |
|
931 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
932 |
|
933 if (ep_p) |
|
934 { |
|
935 /* Remember the state of the endpoint */ |
|
936 epstate = CyAsDmaEndPointIsEnabled(ep_p) ; |
|
937 |
|
938 /* |
|
939 * Disable the endpoint so no more DMA packets can be |
|
940 * queued. |
|
941 */ |
|
942 CyAsDmaEnableEndPoint(dev_p, ep, CyFalse, CyAsDirectionDontChange) ; |
|
943 |
|
944 /* |
|
945 * Don't allow any interrupts from this endpoint while we get the |
|
946 * most current request off of the queue. |
|
947 */ |
|
948 CyAsDmaSetDrq(dev_p, ep, CyFalse) ; |
|
949 |
|
950 /* |
|
951 * Cancel any pending request queued in the HAL layer |
|
952 */ |
|
953 if (CyAsDmaEndPointInTransit(ep_p)) |
|
954 CyAsHalDmaCancelRequest(dev_p->tag, ep_p->ep) ; |
|
955 |
|
956 /* |
|
957 * Shutdown the DMA for this endpoint so no more data is transferred |
|
958 */ |
|
959 CyAsDmaEndPointSetStopped(ep_p) ; |
|
960 |
|
961 /* |
|
962 * Mark the endpoint as not in transit, because we are going to consume |
|
963 * any queued requests |
|
964 */ |
|
965 CyAsDmaEndPointClearInTransit(ep_p) ; |
|
966 |
|
967 /* |
|
968 * Now, remove each entry in the queue and call the associated callback |
|
969 * stating that the request was canceled. |
|
970 */ |
|
971 ep_p->last_p = 0 ; |
|
972 while (ep_p->queue_p != 0) |
|
973 { |
|
974 /* Disable interrupts to manipulate the queue */ |
|
975 mask = CyAsHalDisableInterrupts() ; |
|
976 |
|
977 /* Remove an entry from the queue */ |
|
978 entry_p = ep_p->queue_p ; |
|
979 ep_p->queue_p = entry_p->next_p ; |
|
980 |
|
981 /* Ok, the queue has been updated, we can turn interrupts back on */ |
|
982 CyAsHalEnableInterrupts(mask) ; |
|
983 |
|
984 /* Call the callback indicating we have canceled the DMA */ |
|
985 if (entry_p->cb) |
|
986 entry_p->cb(dev_p, ep, entry_p->buf_p, entry_p->size, err) ; |
|
987 |
|
988 CyAsDmaAddRequestToFreeQueue(dev_p, entry_p) ; |
|
989 } |
|
990 |
|
991 if (ep == 0 || ep == 1) |
|
992 { |
|
993 /* |
|
994 * If this endpoint is zero or one, we need to clear the queue of any pending |
|
995 * CY_RQT_USB_EP_DATA requests as these are pending requests to send data to |
|
996 * the West Bridge device. |
|
997 */ |
|
998 CyAsLLRemoveEpDataRequests(dev_p, ep) ; |
|
999 } |
|
1000 |
|
1001 if (epstate) |
|
1002 { |
|
1003 /* |
|
1004 * The endpoint started out enabled, so we re-enable the endpoint here. |
|
1005 */ |
|
1006 CyAsDmaEnableEndPoint(dev_p, ep, CyTrue, CyAsDirectionDontChange) ; |
|
1007 } |
|
1008 } |
|
1009 |
|
1010 return CY_AS_ERROR_SUCCESS ; |
|
1011 } |
|
1012 |
|
1013 CyAsReturnStatus_t |
|
1014 CyAsDmaReceivedData(CyAsDevice *dev_p, CyAsEndPointNumber_t ep, uint32_t dsize, void *data) |
|
1015 { |
|
1016 CyAsDmaQueueEntry *dma_p ; |
|
1017 uint8_t *src_p, *dest_p ; |
|
1018 CyAsDmaEndPoint *ep_p ; |
|
1019 uint32_t xfersize ; |
|
1020 |
|
1021 /* |
|
1022 * Make sure the endpoint is valid |
|
1023 */ |
|
1024 if (ep != 0 && ep != 1) |
|
1025 return CY_AS_ERROR_INVALID_ENDPOINT ; |
|
1026 |
|
1027 /* Get the endpoint pointer based on the endpoint number */ |
|
1028 ep_p = CY_AS_NUM_EP(dev_p, ep) ; |
|
1029 dma_p = ep_p->queue_p ; |
|
1030 if (dma_p == 0) |
|
1031 return CY_AS_ERROR_SUCCESS ; |
|
1032 |
|
1033 /* |
|
1034 * If the data received exceeds the size of the DMA buffer, clip the data to the size |
|
1035 * of the buffer. This can lead to loosing some data, but is not different than doing |
|
1036 * non-packet reads on the other endpoints. |
|
1037 */ |
|
1038 if (dsize > dma_p->size - dma_p->offset) |
|
1039 dsize = dma_p->size - dma_p->offset ; |
|
1040 |
|
1041 /* |
|
1042 * Copy the data from the request packet to the DMA buffer for the endpoint |
|
1043 */ |
|
1044 src_p = (uint8_t *)data ; |
|
1045 dest_p = ((uint8_t *)(dma_p->buf_p)) + dma_p->offset ; |
|
1046 xfersize = dsize ; |
|
1047 while (xfersize-- > 0) |
|
1048 *dest_p++ = *src_p++ ; |
|
1049 |
|
1050 /* Signal the DMA module that we have received data for this EP request */ |
|
1051 CyAsDmaCompletedCallback(dev_p->tag, ep, dsize, CY_AS_ERROR_SUCCESS) ; |
|
1052 |
|
1053 return CY_AS_ERROR_SUCCESS ; |
|
1054 } |