|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include "zerocopytransferstrategy.h" |
|
17 |
|
18 #include <d32usbtransfers.h> |
|
19 #include <d32usbdi.h> |
|
20 #include <d32usbdi_errors.h> |
|
21 #include "zerocopymetadata.h" |
|
22 #include "usbdiutils.h" |
|
23 |
|
24 |
|
25 RUsbZeroCopyTransferStrategy::TUsbTransferDescriptorDetails::TUsbTransferDescriptorDetails(RUsbTransferDescriptor& aTransferDesc, TInt aRequiredSize, TUint aRequiredAlignment, TInt aRequiredMaxPackets) |
|
26 : iTransferDesc(aTransferDesc) |
|
27 , iRequiredSize(aRequiredSize) |
|
28 , iRequiredAlignment(aRequiredAlignment) |
|
29 , iRequiredMaxPackets(aRequiredMaxPackets) |
|
30 { |
|
31 } |
|
32 |
|
33 RUsbZeroCopyTransferStrategy::RUsbZeroCopyTransferStrategy() |
|
34 : iInterfaceHandle(NULL) |
|
35 { |
|
36 } |
|
37 |
|
38 |
|
39 void RUsbZeroCopyTransferStrategy::Close() |
|
40 { |
|
41 iInterfaceHandle = NULL; |
|
42 iChunk.Close(); |
|
43 iRegisteredTransfers.Close(); |
|
44 RUsbTransferStrategy::Close(); |
|
45 } |
|
46 |
|
47 |
|
48 TInt RUsbZeroCopyTransferStrategy::RegisterTransferDescriptor(RUsbTransferDescriptor& aTransferDesc, TInt aRequiredSize, TUint aStartAlignment, TInt aRequiredMaxPackets) |
|
49 { |
|
50 __ASSERT_ALWAYS(!iInterfaceHandle, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorsAlreadyRegistered)); |
|
51 if (iRegisteredTransfers.Find(aTransferDesc, CompareTransferDescriptor) != KErrNotFound) |
|
52 { |
|
53 return KErrAlreadyExists; |
|
54 } |
|
55 return iRegisteredTransfers.Append(TUsbTransferDescriptorDetails(aTransferDesc, aRequiredSize, aStartAlignment, aRequiredMaxPackets)); |
|
56 } |
|
57 |
|
58 TBool RUsbZeroCopyTransferStrategy::CompareTransferDescriptor(const RUsbTransferDescriptor* aTransferDesc, const TUsbTransferDescriptorDetails& aDetails) |
|
59 { |
|
60 return aTransferDesc == &aDetails.iTransferDesc; |
|
61 } |
|
62 |
|
63 |
|
64 void RUsbZeroCopyTransferStrategy::ResetTransferDescriptors() |
|
65 { |
|
66 __ASSERT_ALWAYS(!iInterfaceHandle, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorsAlreadyRegistered)); |
|
67 iRegisteredTransfers.Reset(); |
|
68 } |
|
69 |
|
70 |
|
71 TInt RUsbZeroCopyTransferStrategy::InitialiseTransferDescriptors(RUsbInterface& aInterface) |
|
72 { |
|
73 __ASSERT_ALWAYS(!iInterfaceHandle, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorsAlreadyRegistered)); |
|
74 |
|
75 // This is the equivilent of a standard R-class Open() method, so initialise the references |
|
76 // we are going to use. |
|
77 iInterfaceHandle = &aInterface; |
|
78 |
|
79 // First get the page-size as we will need this for isoc transfer calculations. |
|
80 TInt hcdPageSize = 0; |
|
81 TInt err = aInterface.GetHcdPageSize(hcdPageSize); |
|
82 if (err != KErrNone) |
|
83 { |
|
84 Close(); // roll back |
|
85 return err; |
|
86 } |
|
87 iPageSize = hcdPageSize; |
|
88 |
|
89 TInt currentOffset = 0; |
|
90 TInt numStandardTransfers = 0; |
|
91 TInt numIsocTransfers = 0; |
|
92 TInt numIsocElements = 0; |
|
93 err = CalculateDataLayout(currentOffset, numStandardTransfers, numIsocTransfers, numIsocElements); |
|
94 if (err != KErrNone) |
|
95 { |
|
96 Close(); // roll back |
|
97 return err; |
|
98 } |
|
99 |
|
100 TInt metaDataStart = 0; |
|
101 CalculateMetaDataLayout(currentOffset, metaDataStart, numStandardTransfers, numIsocTransfers, numIsocElements); |
|
102 |
|
103 // currentOffset should now be just past the region required for all the data and meta data. |
|
104 // Therefore it equals the total size of the buffer we need to hold them all. |
|
105 err = iInterfaceHandle->AllocateSharedChunk(iChunk, currentOffset, iBaseOffset); |
|
106 if (err != KErrNone) |
|
107 { |
|
108 Close(); // roll back |
|
109 return err; |
|
110 } |
|
111 |
|
112 InitialiseMetaData(metaDataStart, numStandardTransfers, numIsocTransfers, numIsocElements); |
|
113 |
|
114 return KErrNone; |
|
115 } |
|
116 |
|
117 TInt RUsbZeroCopyTransferStrategy::CalculateDataLayout(TInt& aCurrentOffset, TInt& aNumStandardTransfers, TInt& aNumIsocTransfers, TInt& aNumIsocElements) |
|
118 { |
|
119 const TUint32 pageAddrBits = iPageSize-1; |
|
120 const TUint32 pageTableMask = ~pageAddrBits; |
|
121 |
|
122 //Get the maximum wMaxPacketSize of the associated interface for Bulk/Interrupt EPs |
|
123 TInt maxMaxBulk = 0; |
|
124 TInt maxMaxInterrupt = 0; |
|
125 TInt err = GetMaximumMaxPacketSize(maxMaxBulk, maxMaxInterrupt); |
|
126 if (err != KErrNone) |
|
127 { |
|
128 return err; |
|
129 } |
|
130 |
|
131 // Work out where to place the transfers, and how much space is needed. |
|
132 TInt numTransfers = iRegisteredTransfers.Count(); |
|
133 for (TInt i=0; i < numTransfers; ++i) |
|
134 { |
|
135 TUsbTransferDescriptorDetails& details = iRegisteredTransfers[i]; |
|
136 |
|
137 err = CaculateAdditionalAlignment(aCurrentOffset, maxMaxBulk, maxMaxInterrupt, details); |
|
138 if (err != KErrNone) |
|
139 { |
|
140 return err; |
|
141 } |
|
142 |
|
143 // only allow intra-page alignment requests that are powers of 2 (so offset agnostic). |
|
144 __ASSERT_ALWAYS(details.iRequiredAlignment <= iPageSize, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorAlignmentOverPageBoundary)); |
|
145 __ASSERT_ALWAYS(IsPowerOfTwo(details.iRequiredAlignment), UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorAlignmentNotPowerOfTwo)); |
|
146 |
|
147 TInt alignPad = IncNeededToAlign(aCurrentOffset, details.iRequiredAlignment); |
|
148 __ASSERT_DEBUG(alignPad < iPageSize, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadAlignment)); // just re-asserting what should be guarded above |
|
149 aCurrentOffset += alignPad; // Align to the start of transfer buffer |
|
150 |
|
151 // There are stark differences between isoc transfers and transfer of other types. |
|
152 if (details.iTransferDesc.iType == RUsbTransferDescriptor::EIsochronous) |
|
153 { |
|
154 // First do some Isoc specific checks |
|
155 __ASSERT_ALWAYS(details.iRequiredMaxPackets > 0, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorNoPacketsRequested)); |
|
156 |
|
157 // For the allocation we have to consider the worse case - that is that the max |
|
158 // number of packets at the max packet size. |
|
159 // We are constrained by the USB stack to not allow transfers across page boundaries. |
|
160 |
|
161 // As such we calculate how many packets we can fit into a page to determine the |
|
162 // number of pages for data we need. |
|
163 const TInt packetsPerPage = iPageSize/details.iRequiredSize; |
|
164 |
|
165 // Assign the start to an appropriate point. |
|
166 details.iAssignedOffset = aCurrentOffset; |
|
167 TInt packetsToStore = details.iRequiredMaxPackets; |
|
168 TInt numElements = 0; // for counting up the number of pages we need meta-data for. |
|
169 |
|
170 // The size requried to hold a length array for the descriptor |
|
171 const TInt lengthsArrayLength = UsbZeroCopyIsocChunkHeader::KLengthsElementSize * details.iRequiredMaxPackets; |
|
172 // The size required to hold a result array for the descriptor |
|
173 const TInt resultsArrayLength = UsbZeroCopyIsocChunkHeader::KResultsElementSize * details.iRequiredMaxPackets; |
|
174 |
|
175 // Determine how much we can fit into the remaining space of the current page. |
|
176 TBool samePage = (pageTableMask & aCurrentOffset) == (pageTableMask & (aCurrentOffset - alignPad)); |
|
177 if (samePage) |
|
178 { |
|
179 TInt remainingSpace = iPageSize - (pageAddrBits & aCurrentOffset); |
|
180 TInt packetsThatFit = remainingSpace / details.iRequiredSize; |
|
181 if (packetsThatFit >= packetsToStore) |
|
182 { |
|
183 // We can fit it in this page so we finish here - this is the special case. |
|
184 aCurrentOffset += packetsToStore * details.iRequiredSize; |
|
185 ++aNumIsocElements; |
|
186 ++aNumIsocTransfers; |
|
187 details.iNumElements = 1; |
|
188 // Do the lengths array |
|
189 aCurrentOffset += IncNeededToAlign(aCurrentOffset, UsbZeroCopyIsocChunkHeader::KLengthsElementSize); |
|
190 details.iLengthsOffset = aCurrentOffset; |
|
191 aCurrentOffset += lengthsArrayLength; |
|
192 // The dual lengths array should be implicitly alligned |
|
193 details.iReqLenOffset = aCurrentOffset; |
|
194 aCurrentOffset += lengthsArrayLength; |
|
195 // Now handle the results array |
|
196 aCurrentOffset += IncNeededToAlign(aCurrentOffset, UsbZeroCopyIsocChunkHeader::KResultsElementSize); |
|
197 details.iResultsOffset = aCurrentOffset; |
|
198 aCurrentOffset += resultsArrayLength; |
|
199 continue; |
|
200 } |
|
201 aCurrentOffset = (pageTableMask & aCurrentOffset) + iPageSize; // Advance to next page |
|
202 packetsToStore -= packetsThatFit; |
|
203 ++numElements; |
|
204 } |
|
205 __ASSERT_DEBUG(packetsToStore > 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorNoPacketsLeftToStore)); |
|
206 |
|
207 // Determine the number of pages extra that are needed (minus one) |
|
208 TInt pagesRequired = packetsToStore / packetsPerPage; |
|
209 |
|
210 // Determine how much of the last page is actually needed. |
|
211 TInt trailingPackets = packetsToStore % packetsPerPage; |
|
212 TInt usedSpace = trailingPackets * details.iRequiredSize; |
|
213 |
|
214 // Commit the amount for the buffers. |
|
215 aCurrentOffset += usedSpace + pagesRequired*iPageSize; |
|
216 numElements += pagesRequired + /*the final page*/1; // We have already included the first page (if already partially used) |
|
217 aNumIsocElements += numElements; |
|
218 ++aNumIsocTransfers; |
|
219 |
|
220 // Used to ensure only allocate an appropriate number per-descriptor. |
|
221 details.iNumElements = numElements; |
|
222 |
|
223 // We also need an array of lengths for each packet that we use (need to align to even bytes). |
|
224 aCurrentOffset += IncNeededToAlign(aCurrentOffset, UsbZeroCopyIsocChunkHeader::KLengthsElementSize); |
|
225 details.iLengthsOffset = aCurrentOffset; |
|
226 aCurrentOffset += lengthsArrayLength; |
|
227 // Dual length array should be implicitly aligned |
|
228 details.iReqLenOffset = aCurrentOffset; |
|
229 aCurrentOffset += lengthsArrayLength; |
|
230 // Now handle the results array |
|
231 aCurrentOffset += IncNeededToAlign(aCurrentOffset, UsbZeroCopyIsocChunkHeader::KResultsElementSize); |
|
232 details.iResultsOffset = aCurrentOffset; |
|
233 aCurrentOffset += resultsArrayLength; |
|
234 } |
|
235 else |
|
236 { |
|
237 details.iAssignedOffset = aCurrentOffset; |
|
238 aCurrentOffset += details.iRequiredSize; |
|
239 ++aNumStandardTransfers; |
|
240 } |
|
241 } |
|
242 |
|
243 return KErrNone; |
|
244 } |
|
245 |
|
246 |
|
247 void RUsbZeroCopyTransferStrategy::CalculateMetaDataLayout(TInt& aCurrentOffset, TInt& aMetaDataStart, TInt aNumStandardTransfers, TInt aNumIsocTransfers, TInt aNumIsocElements) |
|
248 { |
|
249 // Round up to 4 byte alignment for handling the meta-data correctly. |
|
250 aCurrentOffset += IncNeededToAlign(aCurrentOffset, sizeof(TInt)); |
|
251 |
|
252 aMetaDataStart = aCurrentOffset; |
|
253 |
|
254 // Now calculate the size required for the transfer meta-data. |
|
255 aCurrentOffset += aNumStandardTransfers * UsbZeroCopyBulkIntrChunkHeader::HeaderSize(); |
|
256 aCurrentOffset += aNumIsocTransfers * UsbZeroCopyIsocChunkHeader::HeaderSize(); |
|
257 aCurrentOffset += aNumIsocElements * UsbZeroCopyIsocChunkElement::ElementSize(); |
|
258 } |
|
259 |
|
260 void RUsbZeroCopyTransferStrategy::InitialiseMetaData(TInt aMetaDataOffset, TInt aNumStandardTransfers, TInt aNumIsocTransfers, TInt aNumIsocElements) |
|
261 { |
|
262 const TUint32 pageAddrBits = iPageSize-1; |
|
263 const TUint32 pageTableMask = ~pageAddrBits; |
|
264 |
|
265 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
266 |
|
267 TInt numTransfers = iRegisteredTransfers.Count(); |
|
268 for (TInt i=0; i < numTransfers; ++i) |
|
269 { |
|
270 TUsbTransferDescriptorDetails details = iRegisteredTransfers[i]; |
|
271 |
|
272 if (details.iTransferDesc.iType == RUsbTransferDescriptor::EIsochronous) |
|
273 { |
|
274 // Initialise Meta-data (minus elements). |
|
275 UsbZeroCopyIsocChunkHeader::TransferType(chunkBase, aMetaDataOffset) = details.iTransferDesc.iType; |
|
276 UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aMetaDataOffset) = details.iRequiredMaxPackets; |
|
277 UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aMetaDataOffset) = details.iRequiredSize; |
|
278 // Double check that the length array is aligned correctly. |
|
279 __ASSERT_DEBUG(details.iLengthsOffset % UsbZeroCopyIsocChunkHeader::KLengthsElementSize == 0, |
|
280 UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorLengthsArrayBadAlignment)); |
|
281 UsbZeroCopyIsocChunkHeader::LengthsOffset(chunkBase, aMetaDataOffset) = details.iLengthsOffset; |
|
282 UsbZeroCopyIsocChunkHeader::ReqLenOffset(chunkBase, aMetaDataOffset) = details.iReqLenOffset; |
|
283 // Double check that the result array is aligned correctly. |
|
284 __ASSERT_DEBUG(details.iResultsOffset % UsbZeroCopyIsocChunkHeader::KResultsElementSize == 0, |
|
285 UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorResultsArrayBadAlignment)); |
|
286 UsbZeroCopyIsocChunkHeader::ResultsOffset(chunkBase, aMetaDataOffset) = details.iResultsOffset; |
|
287 // Initialise transfer descriptor |
|
288 SetTransferHandle(details.iTransferDesc, aMetaDataOffset); |
|
289 // Move on to next meta-data slot |
|
290 TInt prevMetaOffset = aMetaDataOffset; |
|
291 aMetaDataOffset += UsbZeroCopyIsocChunkHeader::HeaderSize(); |
|
292 |
|
293 // Initialise elements for transfers |
|
294 UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, prevMetaOffset) = aMetaDataOffset; |
|
295 |
|
296 TInt isocElementsUnmapped = details.iNumElements; |
|
297 // First element could be anywhere, the others are at the start of (virtually) contiguous pages |
|
298 TInt offset = details.iAssignedOffset; |
|
299 while (isocElementsUnmapped > 0) |
|
300 { |
|
301 // Update the data references |
|
302 UsbZeroCopyIsocChunkElement::DataOffset(chunkBase, aMetaDataOffset) = offset; |
|
303 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, aMetaDataOffset) = 0; // Default value. |
|
304 // Move on to the next element and bind it to the chain. |
|
305 prevMetaOffset = aMetaDataOffset; |
|
306 aMetaDataOffset += UsbZeroCopyIsocChunkElement::ElementSize(); |
|
307 UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, prevMetaOffset) = aMetaDataOffset; |
|
308 // Move to the next page |
|
309 offset = (pageTableMask&offset)+iPageSize; |
|
310 --isocElementsUnmapped; |
|
311 --aNumIsocElements; |
|
312 } |
|
313 // We have reached the end of the list so we should update the next element offset for the |
|
314 // last element to indicate that it is the terminator. |
|
315 UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, prevMetaOffset) = UsbZeroCopyIsocChunkElement::KEndOfList; |
|
316 --aNumIsocTransfers; |
|
317 } |
|
318 else |
|
319 { |
|
320 // Initialise Meta-data. |
|
321 UsbZeroCopyBulkIntrChunkHeader::TransferType(chunkBase, aMetaDataOffset) = details.iTransferDesc.iType; |
|
322 UsbZeroCopyBulkIntrChunkHeader::DataOffset(chunkBase, aMetaDataOffset) = details.iAssignedOffset; |
|
323 UsbZeroCopyBulkIntrChunkHeader::DataLength(chunkBase, aMetaDataOffset) = 0; |
|
324 UsbZeroCopyBulkIntrChunkHeader::DataMaxLength(chunkBase, aMetaDataOffset) = details.iRequiredSize; |
|
325 UsbZeroCopyBulkIntrChunkHeader::ZlpStatus(chunkBase, aMetaDataOffset) = RUsbTransferDescriptor::ESendZlpIfRequired; |
|
326 // Initialise transfer descriptor |
|
327 SetTransferHandle(details.iTransferDesc, aMetaDataOffset); |
|
328 // Move on to next meta-data slot |
|
329 aMetaDataOffset += UsbZeroCopyBulkIntrChunkHeader::HeaderSize(); |
|
330 --aNumStandardTransfers; |
|
331 } |
|
332 } |
|
333 |
|
334 __ASSERT_DEBUG(aNumStandardTransfers == 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorIncompleteInitialisation)); |
|
335 __ASSERT_DEBUG(aNumIsocTransfers == 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorIncompleteInitialisation)); |
|
336 __ASSERT_DEBUG(aNumIsocElements == 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorIncompleteInitialisation)); |
|
337 } |
|
338 |
|
339 |
|
340 TBool RUsbZeroCopyTransferStrategy::IsPowerOfTwo(TUint aNumber) |
|
341 { |
|
342 return aNumber && !(aNumber & (aNumber - 1)); //this returns true if the integer is a power of two |
|
343 } |
|
344 |
|
345 |
|
346 TInt RUsbZeroCopyTransferStrategy::IncNeededToAlign(TInt aOffset, TUint aAlignment) |
|
347 { |
|
348 if (aAlignment == 0) |
|
349 { |
|
350 return 0; |
|
351 } |
|
352 TInt remain = aOffset % aAlignment; |
|
353 return (aAlignment - remain) % aAlignment; |
|
354 } |
|
355 |
|
356 |
|
357 // Standard Methods |
|
358 |
|
359 TPtr8 RUsbZeroCopyTransferStrategy::WritableBuffer(TInt aHandle) |
|
360 { |
|
361 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
362 |
|
363 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
364 |
|
365 TUint8* dataPtr = chunkBase + UsbZeroCopyBulkIntrChunkHeader::DataOffset(chunkBase, aHandle); |
|
366 TInt maxLength = UsbZeroCopyBulkIntrChunkHeader::DataMaxLength(chunkBase, aHandle); |
|
367 |
|
368 return TPtr8(dataPtr, 0, maxLength); |
|
369 } |
|
370 |
|
371 void RUsbZeroCopyTransferStrategy::SaveData(TInt aHandle, TInt aLength) |
|
372 { |
|
373 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
374 |
|
375 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
376 |
|
377 TInt maxLength = UsbZeroCopyBulkIntrChunkHeader::DataMaxLength(chunkBase, aHandle); |
|
378 __ASSERT_ALWAYS(aLength <= maxLength, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorSavedToMuchData)); |
|
379 |
|
380 UsbZeroCopyBulkIntrChunkHeader::DataLength(chunkBase, aHandle) = aLength; |
|
381 } |
|
382 |
|
383 void RUsbZeroCopyTransferStrategy::SetZlpStatus(TInt aHandle, RUsbTransferDescriptor::TZlpStatus aZlpStatus) |
|
384 { |
|
385 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
386 |
|
387 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
388 |
|
389 UsbZeroCopyBulkIntrChunkHeader::ZlpStatus(chunkBase, aHandle) = aZlpStatus; |
|
390 } |
|
391 |
|
392 TPtrC8 RUsbZeroCopyTransferStrategy::Buffer(TInt aHandle) const |
|
393 { |
|
394 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
395 |
|
396 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
397 |
|
398 TUint8* dataPtr = chunkBase + UsbZeroCopyBulkIntrChunkHeader::DataOffset(chunkBase, aHandle); |
|
399 TInt length = UsbZeroCopyBulkIntrChunkHeader::DataLength(chunkBase, aHandle); |
|
400 |
|
401 return TPtrC8(dataPtr, length); |
|
402 } |
|
403 |
|
404 |
|
405 |
|
406 |
|
407 // Isochronous Methods |
|
408 |
|
409 void RUsbZeroCopyTransferStrategy::Reset(TInt aHandle) |
|
410 { |
|
411 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
412 |
|
413 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
414 |
|
415 // Loop through and reset number of packets in each element as 0 |
|
416 TInt elementOffset = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHandle); |
|
417 while (elementOffset != UsbZeroCopyIsocChunkElement::KEndOfList) |
|
418 { |
|
419 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, elementOffset) = 0; |
|
420 elementOffset = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, elementOffset); |
|
421 } |
|
422 } |
|
423 |
|
424 TPacketLengths RUsbZeroCopyTransferStrategy::Lengths(TInt aHandle) |
|
425 { |
|
426 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
427 |
|
428 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
429 |
|
430 TInt lengthsOffset = UsbZeroCopyIsocChunkHeader::LengthsOffset(chunkBase, aHandle); |
|
431 TUint16* lengthsPtr = reinterpret_cast<TUint16*>(chunkBase + lengthsOffset); |
|
432 |
|
433 TInt reqLenOffset = UsbZeroCopyIsocChunkHeader::ReqLenOffset(chunkBase, aHandle); |
|
434 TUint16* reqLenPtr = reinterpret_cast<TUint16*>(chunkBase + reqLenOffset); |
|
435 |
|
436 TInt& maxNumPackets = UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle); |
|
437 |
|
438 return TPacketLengths(lengthsPtr, reqLenPtr, maxNumPackets); |
|
439 } |
|
440 |
|
441 TPacketResults RUsbZeroCopyTransferStrategy::Results(TInt aHandle) |
|
442 { |
|
443 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
444 |
|
445 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
446 |
|
447 TInt resultsOffset = UsbZeroCopyIsocChunkHeader::ResultsOffset(chunkBase, aHandle); |
|
448 TInt* resultsPtr = reinterpret_cast<TInt*>(chunkBase + resultsOffset); |
|
449 |
|
450 TInt& maxNumPackets = UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle); |
|
451 |
|
452 return TPacketResults(resultsPtr, maxNumPackets); |
|
453 } |
|
454 |
|
455 TInt RUsbZeroCopyTransferStrategy::MaxPacketSize(TInt aHandle) |
|
456 { |
|
457 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
458 |
|
459 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
460 |
|
461 TInt maxPacketSize = UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aHandle); |
|
462 |
|
463 return maxPacketSize; |
|
464 } |
|
465 |
|
466 TPtr8 RUsbZeroCopyTransferStrategy::WritablePackets(TInt aHandle, TInt aWriteHandle, TInt aNumPacketsRequested, TInt& aMaxNumPacketsAbleToWrite) |
|
467 { |
|
468 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
469 __ASSERT_DEBUG(aWriteHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadWriteHandle)); |
|
470 |
|
471 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
472 |
|
473 const TUint32 pageAddrBits = iPageSize-1; |
|
474 const TUint32 pageTableMask = ~pageAddrBits; |
|
475 |
|
476 if (aHandle == aWriteHandle) |
|
477 { |
|
478 // The initial write handle will be the same as the standard handle so we need to find the actual |
|
479 // element to work correctly. |
|
480 aWriteHandle = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHandle); |
|
481 } |
|
482 |
|
483 // Now we have two cases - the number of packets requested is contained in one page, or it crosses the page. |
|
484 // 1) If we cross the page then we get the buffer for upto the end of the page, and inform the user of the number |
|
485 // of packets they are able to write into it (normally this will be quite high as we can consider 0 length |
|
486 // packets.) |
|
487 // 2) If we are on one page then we provide a buffer to the end of the page and return the number of packets |
|
488 // the requested as the max they can write. However we also now mark it so that an attempt to get a subsequent |
|
489 // writable buffer will return a 0 max length TPtr8 and 0 max number of packets to write. If they want write |
|
490 // more they need to reset the descriptor and start again. |
|
491 |
|
492 if (UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, aWriteHandle) == UsbZeroCopyIsocChunkElement::KInvalidElement) |
|
493 { |
|
494 // Here we are testing the second case, if we previously marked an element as invalid then we must not |
|
495 // return a valid buffer. |
|
496 aMaxNumPacketsAbleToWrite = 0; |
|
497 return TPtr8(NULL, 0); |
|
498 } |
|
499 |
|
500 TInt dataOffset = UsbZeroCopyIsocChunkElement::DataOffset(chunkBase, aWriteHandle); |
|
501 |
|
502 TUint8* dataPtr = chunkBase + dataOffset; |
|
503 TInt totalMaxSize = aNumPacketsRequested * UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aHandle); |
|
504 // The USB stack requires isoc transfer to be limited to a page (not allowed to cross the boundary). |
|
505 TUint32 dataAddr = reinterpret_cast<TUint32>(dataPtr); |
|
506 TBool samePage = (pageTableMask & dataAddr) == (pageTableMask & (dataAddr + totalMaxSize)); |
|
507 TInt allowableSize = samePage ? totalMaxSize : iPageSize - (pageAddrBits & dataAddr); |
|
508 |
|
509 TInt numPacketsRemaining = UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle) - UsedPackets(aHandle); |
|
510 |
|
511 if (aNumPacketsRequested < numPacketsRemaining) |
|
512 { |
|
513 // This is the 2nd case as documented in the comment. So we mark the next packet as invalid. |
|
514 aMaxNumPacketsAbleToWrite = aNumPacketsRequested; |
|
515 TInt nextElement = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, aWriteHandle); |
|
516 if (nextElement != UsbZeroCopyIsocChunkElement::KEndOfList) |
|
517 { |
|
518 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, nextElement) = UsbZeroCopyIsocChunkElement::KInvalidElement; // Mark as invalid. |
|
519 } |
|
520 // else we are at the end of the list anyway |
|
521 } |
|
522 else |
|
523 { |
|
524 aMaxNumPacketsAbleToWrite = numPacketsRemaining; |
|
525 } |
|
526 |
|
527 return TPtr8(dataPtr, allowableSize); |
|
528 } |
|
529 |
|
530 TInt RUsbZeroCopyTransferStrategy::SaveMultiple(TInt aHandle, TInt aWriteHandle, TInt aNumPackets) |
|
531 { |
|
532 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
533 __ASSERT_DEBUG(aWriteHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadWriteHandle)); |
|
534 __ASSERT_ALWAYS(aNumPackets > 0, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorNoPacketsToSave)); |
|
535 |
|
536 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
537 |
|
538 if (aHandle == aWriteHandle) |
|
539 { |
|
540 aWriteHandle = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHandle); |
|
541 } |
|
542 |
|
543 // if marked invalid then they shouldn't try to save it (they haven't been able to write anything into the data anyway). |
|
544 __ASSERT_ALWAYS(UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, aWriteHandle) != UsbZeroCopyIsocChunkElement::KInvalidElement, |
|
545 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorInvalidSaveCall)); |
|
546 |
|
547 // Ensure they've not tried to write in too many packets |
|
548 TInt usedPackets = UsedPackets(aHandle); |
|
549 __ASSERT_ALWAYS(aNumPackets + usedPackets <= UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle), |
|
550 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorSavedTooManyPackets)); |
|
551 |
|
552 // Check that the length values have not exceeded the maximum. |
|
553 TInt maxPacketSize = UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aHandle); |
|
554 TInt lengthsOffset = UsbZeroCopyIsocChunkHeader::LengthsOffset(chunkBase, aHandle); |
|
555 TUint16* lengthsPtr = reinterpret_cast<TUint16*>(chunkBase + lengthsOffset); |
|
556 #ifdef _DEBUG |
|
557 // The requested length is only functionally needed for IN transfers, but it provides an |
|
558 // extra check that the length values that were requested by the user are those that are |
|
559 // been requested on the USB stack. |
|
560 TInt reqLenOffset = UsbZeroCopyIsocChunkHeader::ReqLenOffset(chunkBase, aHandle); |
|
561 TUint16* reqLenPtr = reinterpret_cast<TUint16*>(chunkBase + reqLenOffset); |
|
562 #endif // _DEBUG |
|
563 for (TInt i=0; i < aNumPackets; ++i) |
|
564 { |
|
565 __ASSERT_ALWAYS(lengthsPtr[usedPackets + i] <= maxPacketSize, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorSavingTooLargeAPacket)); |
|
566 __ASSERT_DEBUG(lengthsPtr[usedPackets + i] == reqLenPtr[usedPackets + i], UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorRequestedLengthDiffers)); // Belt 'n' Braces |
|
567 } |
|
568 |
|
569 // Commit the packets to the transfer descriptor. |
|
570 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, aWriteHandle) = aNumPackets; |
|
571 TInt headerOffset = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, aWriteHandle); |
|
572 |
|
573 // Return the handle to the next region for writing. |
|
574 return (headerOffset == UsbZeroCopyIsocChunkElement::KEndOfList) ? KErrEof : headerOffset; |
|
575 } |
|
576 |
|
577 /** |
|
578 Used to walk the elements to total up the number of packets that have been saved in the transfer descriptor. |
|
579 */ |
|
580 TInt RUsbZeroCopyTransferStrategy::UsedPackets(TInt aHeaderOffset) |
|
581 { |
|
582 __ASSERT_DEBUG(aHeaderOffset >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorInvalidHeaderOffset)); |
|
583 |
|
584 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
585 TInt elementOffset = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHeaderOffset); |
|
586 TInt totalNumPackets = 0; |
|
587 while (elementOffset != UsbZeroCopyIsocChunkElement::KEndOfList) |
|
588 { |
|
589 TInt numPackets = UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, elementOffset); |
|
590 if (numPackets == 0 || numPackets == UsbZeroCopyIsocChunkElement::KInvalidElement) |
|
591 { |
|
592 break; |
|
593 } |
|
594 totalNumPackets += numPackets; |
|
595 elementOffset = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, elementOffset); |
|
596 } |
|
597 return totalNumPackets; |
|
598 } |
|
599 |
|
600 /** |
|
601 Used to read packets out from the transfer descriptor. |
|
602 Note that some of the panics are belt'n'braces, and are used to sanity test result that has been |
|
603 provided. These should be correct (as the results are set by the kernel), however because the user |
|
604 has access to length array (for writing out packets) it is possible for them to 'corrupt' the result. |
|
605 We panic explicitly in UDEB builds, in UREL the guards are not present and the user may get returned |
|
606 a bad descriptor. |
|
607 */ |
|
608 TPtrC8 RUsbZeroCopyTransferStrategy::Packets(TInt aHandle, TInt aFirstPacketIndex, TInt aNumPacketsRequested, TInt& aNumPacketsReturned) const |
|
609 { |
|
610 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
611 __ASSERT_ALWAYS(aFirstPacketIndex >= 0, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorPacketNotInBounds)); |
|
612 __ASSERT_ALWAYS(aNumPacketsRequested > 0, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorTooFewPacketsRequested)); |
|
613 |
|
614 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
615 |
|
616 __ASSERT_ALWAYS(aNumPacketsRequested <= UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle), |
|
617 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorTooManyPacketsRequested)); |
|
618 |
|
619 #ifdef _DEBUG |
|
620 const TUint32 pageAddrBits = iPageSize-1; |
|
621 const TUint32 pageTableMask = ~pageAddrBits; |
|
622 #endif // _DEBUG |
|
623 const TInt maxPacketSize = UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aHandle); |
|
624 |
|
625 TInt elementOffset = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHandle); |
|
626 TInt packetCount = 0; |
|
627 while (elementOffset != UsbZeroCopyIsocChunkElement::KEndOfList) |
|
628 { |
|
629 TInt numPackets = UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, elementOffset); |
|
630 if (numPackets == 0 || numPackets == UsbZeroCopyIsocChunkElement::KInvalidElement) |
|
631 { |
|
632 // We've got to the end of the elements and not found the packets we are after. |
|
633 break; |
|
634 } |
|
635 TInt previousPacketCount = packetCount; |
|
636 packetCount += numPackets; |
|
637 if (aFirstPacketIndex < packetCount) // If true then start packet must be in this element |
|
638 { |
|
639 TInt intraElementIndex = aFirstPacketIndex - previousPacketCount; |
|
640 TInt maxPacketsForReturn = packetCount - aFirstPacketIndex; |
|
641 |
|
642 TInt lengthsOffset = UsbZeroCopyIsocChunkHeader::LengthsOffset(chunkBase, aHandle); |
|
643 TUint16* lengthsPtr = reinterpret_cast<TUint16*>(chunkBase + lengthsOffset + previousPacketCount * sizeof(TUint16)); |
|
644 TInt reqLenOffset = UsbZeroCopyIsocChunkHeader::ReqLenOffset(chunkBase, aHandle); |
|
645 TUint16* reqLenPtr = reinterpret_cast<TUint16*>(chunkBase + reqLenOffset + previousPacketCount * sizeof(TUint16)); |
|
646 |
|
647 aNumPacketsReturned = (aNumPacketsRequested < maxPacketsForReturn) ? aNumPacketsRequested : maxPacketsForReturn; |
|
648 |
|
649 TInt distanceToReqPacket = 0; |
|
650 for (TInt i=0; i < intraElementIndex; ++i) |
|
651 { |
|
652 TUint16 reqLen = reqLenPtr[i]; |
|
653 __ASSERT_DEBUG(reqLen <= maxPacketSize, |
|
654 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorReceivedTooLargeAPacket)); // Belt'n'Braces |
|
655 distanceToReqPacket += reqLen; |
|
656 } |
|
657 TInt dataOffset = UsbZeroCopyIsocChunkElement::DataOffset(chunkBase, elementOffset); |
|
658 TUint8* dataPtr = chunkBase + dataOffset + distanceToReqPacket; |
|
659 |
|
660 TInt totalLengthPackets = 0; |
|
661 for (TInt i=0; i < aNumPacketsReturned; ++i) |
|
662 { |
|
663 TUint16 len = lengthsPtr[intraElementIndex + i]; |
|
664 TUint16 reqLen = reqLenPtr[intraElementIndex + i]; |
|
665 __ASSERT_DEBUG(len <= maxPacketSize, |
|
666 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorReceivedTooLargeAPacket)); // Belt'n'Braces |
|
667 |
|
668 totalLengthPackets += len; |
|
669 |
|
670 // Here we handle the potential gaps that may appear in the data stream if a short |
|
671 // packet is received. |
|
672 if (len < reqLen) |
|
673 { |
|
674 // if here then we received a short packet, as such we can only return up to here |
|
675 aNumPacketsReturned = i+1; |
|
676 break; |
|
677 } |
|
678 // Otherwise we expect them to be equal (if we got more than requested then something odd has happened. |
|
679 __ASSERT_DEBUG(len == reqLen, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorRequestedLengthDiffers)); // Belt 'n' Braces |
|
680 } |
|
681 |
|
682 // The USB stack requires isoc transfer to be limited to a page (not allowed to cross the boundary). |
|
683 // Therefore one of our elements must have data only on one page. |
|
684 #ifdef _DEBUG |
|
685 TUint32 dataAddr = reinterpret_cast<TUint32>(dataPtr); |
|
686 TBool samePage = (totalLengthPackets == 0) || (pageTableMask & dataAddr) == (pageTableMask & (dataAddr + totalLengthPackets - 1)); |
|
687 __ASSERT_DEBUG(samePage, UsbdiUtils::Panic(UsbdiPanics::EIsocTransferResultCrossesPageBoundary)); // Belt'n'Braces |
|
688 #endif // _DEBUG |
|
689 |
|
690 return TPtrC8(dataPtr, totalLengthPackets); |
|
691 } |
|
692 |
|
693 // No luck so far, move on to try the next element |
|
694 elementOffset = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, elementOffset); |
|
695 } |
|
696 |
|
697 // No suitable packet range found. |
|
698 aNumPacketsReturned = 0; |
|
699 return TPtrC8(NULL, 0); |
|
700 } |
|
701 |
|
702 void RUsbZeroCopyTransferStrategy::ReceivePackets(TInt aHandle, TInt aNumPackets) |
|
703 { |
|
704 __ASSERT_DEBUG(aHandle >= 0, UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorBadHandle)); |
|
705 __ASSERT_ALWAYS(aNumPackets > 0, UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorTooFewPacketsRequested)); |
|
706 |
|
707 TUint8* chunkBase = iChunk.Base() + iBaseOffset; |
|
708 |
|
709 __ASSERT_ALWAYS(aNumPackets <= UsbZeroCopyIsocChunkHeader::MaxNumPackets(chunkBase, aHandle), |
|
710 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorTooManyPacketsRequested)); |
|
711 |
|
712 const TUint32 pageAddrBits = iPageSize-1; |
|
713 const TUint32 pageTableMask = ~pageAddrBits; |
|
714 const TInt maxPacketSize = UsbZeroCopyIsocChunkHeader::MaxPacketSize(chunkBase, aHandle); |
|
715 |
|
716 #ifdef _DEBUG |
|
717 // Here we make the best check we can that the user has set-up the requested lengths they require. |
|
718 // If there is a difference, they have either a corrupted metadata chunk, or they are reusing a |
|
719 // previous buffer without setting the lengths requested. |
|
720 TInt lengthsOffset = UsbZeroCopyIsocChunkHeader::LengthsOffset(chunkBase, aHandle); |
|
721 TUint16* lengthsPtr = reinterpret_cast<TUint16*>(chunkBase + lengthsOffset); |
|
722 TInt reqLenOffset = UsbZeroCopyIsocChunkHeader::ReqLenOffset(chunkBase, aHandle); |
|
723 TUint16* reqLenPtr = reinterpret_cast<TUint16*>(chunkBase + reqLenOffset); |
|
724 for (TInt i=0; i < aNumPackets; ++i) |
|
725 { |
|
726 __ASSERT_DEBUG(lengthsPtr[i] == reqLenPtr[i], |
|
727 UsbdiUtils::Panic(UsbdiPanics::ETransferDescriptorRequestedLengthDiffers)); // Belt 'n' Braces |
|
728 } |
|
729 #endif // _DEBUG |
|
730 |
|
731 TInt elementOffset = UsbZeroCopyIsocChunkHeader::FirstElementOffset(chunkBase, aHandle); |
|
732 while (aNumPackets) |
|
733 { |
|
734 __ASSERT_DEBUG(elementOffset != UsbZeroCopyIsocChunkElement::KEndOfList, |
|
735 UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorUnexpectedEndOfIsocList)); |
|
736 |
|
737 TInt totalMaxSize = aNumPackets * maxPacketSize; |
|
738 |
|
739 TInt dataOffset = UsbZeroCopyIsocChunkElement::DataOffset(chunkBase, elementOffset); |
|
740 TUint8* dataPtr = chunkBase + dataOffset; |
|
741 TUint32 dataAddr = reinterpret_cast<TUint32>(dataPtr); |
|
742 TBool samePage = (pageTableMask & dataAddr) == (pageTableMask & (dataAddr + totalMaxSize)); |
|
743 TInt allowableSize = samePage ? totalMaxSize : iPageSize - (pageAddrBits & dataAddr); |
|
744 TInt numPackets = allowableSize / maxPacketSize; |
|
745 |
|
746 // TODO We could assert here in debug as a double check using UsedPackets() |
|
747 |
|
748 __ASSERT_DEBUG(numPackets > 0, |
|
749 UsbdiUtils::Fault(UsbdiFaults::EUsbTransferDescriptorUnfillableElement)); |
|
750 |
|
751 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, elementOffset) = numPackets; |
|
752 aNumPackets -= numPackets; |
|
753 |
|
754 elementOffset = UsbZeroCopyIsocChunkElement::NextElementOffset(chunkBase, elementOffset); |
|
755 } |
|
756 |
|
757 if (elementOffset != UsbZeroCopyIsocChunkElement::KEndOfList) |
|
758 { |
|
759 UsbZeroCopyIsocChunkElement::NumPackets(chunkBase, elementOffset) = UsbZeroCopyIsocChunkElement::KInvalidElement; // Mark as invalid. |
|
760 } |
|
761 } |
|
762 |
|
763 |
|
764 |
|
765 |
|
766 |
|
767 TPtr8 RUsbZeroCopyTransferStrategy::IntrWritableBuffer(TInt aHandle) |
|
768 { |
|
769 return WritableBuffer(aHandle); |
|
770 } |
|
771 |
|
772 void RUsbZeroCopyTransferStrategy::IntrSaveData(TInt aHandle, TInt aLength) |
|
773 { |
|
774 SaveData(aHandle, aLength); |
|
775 } |
|
776 |
|
777 void RUsbZeroCopyTransferStrategy::IntrSetZlpStatus(TInt aHandle, RUsbTransferDescriptor::TZlpStatus aZlpStatus) |
|
778 { |
|
779 SetZlpStatus(aHandle, aZlpStatus); |
|
780 } |
|
781 |
|
782 TPtrC8 RUsbZeroCopyTransferStrategy::IntrBuffer(TInt aHandle) const |
|
783 { |
|
784 return Buffer(aHandle); |
|
785 } |
|
786 |
|
787 TPtr8 RUsbZeroCopyTransferStrategy::BulkWritableBuffer(TInt aHandle) |
|
788 { |
|
789 return WritableBuffer(aHandle); |
|
790 } |
|
791 |
|
792 void RUsbZeroCopyTransferStrategy::BulkSaveData(TInt aHandle, TInt aLength) |
|
793 { |
|
794 SaveData(aHandle, aLength); |
|
795 } |
|
796 |
|
797 void RUsbZeroCopyTransferStrategy::BulkSetZlpStatus(TInt aHandle, RUsbTransferDescriptor::TZlpStatus aZlpStatus) |
|
798 { |
|
799 SetZlpStatus(aHandle, aZlpStatus); |
|
800 } |
|
801 |
|
802 TPtrC8 RUsbZeroCopyTransferStrategy::BulkBuffer(TInt aHandle) const |
|
803 { |
|
804 return Buffer(aHandle); |
|
805 } |
|
806 |
|
807 void RUsbZeroCopyTransferStrategy::IsocReset(TInt aHandle) |
|
808 { |
|
809 Reset(aHandle); |
|
810 } |
|
811 |
|
812 TPacketLengths RUsbZeroCopyTransferStrategy::IsocLengths(TInt aHandle) |
|
813 { |
|
814 return Lengths(aHandle); |
|
815 } |
|
816 |
|
817 TPacketResults RUsbZeroCopyTransferStrategy::IsocResults(TInt aHandle) |
|
818 { |
|
819 return Results(aHandle); |
|
820 } |
|
821 |
|
822 TInt RUsbZeroCopyTransferStrategy::IsocMaxPacketSize(TInt aHandle) |
|
823 { |
|
824 return MaxPacketSize(aHandle); |
|
825 } |
|
826 |
|
827 TPtr8 RUsbZeroCopyTransferStrategy::IsocWritablePackets(TInt aHandle, TInt aWriteHandle, TInt aNumPacketsRequested, TInt& aMaxNumPacketsAbleToWrite) |
|
828 { |
|
829 return WritablePackets(aHandle, aWriteHandle, aNumPacketsRequested, aMaxNumPacketsAbleToWrite); |
|
830 } |
|
831 |
|
832 TInt RUsbZeroCopyTransferStrategy::IsocSaveMultiple(TInt aHandle, TInt aWriteHandle, TInt aNumOfPackets) |
|
833 { |
|
834 return SaveMultiple(aHandle, aWriteHandle, aNumOfPackets); |
|
835 } |
|
836 |
|
837 TPtrC8 RUsbZeroCopyTransferStrategy::IsocPackets(TInt aHandle, TInt aFirstPacketIndex, TInt aNumPacketsRequested, TInt& aNumPacketsReturned) const |
|
838 { |
|
839 return Packets(aHandle, aFirstPacketIndex, aNumPacketsRequested, aNumPacketsReturned); |
|
840 } |
|
841 |
|
842 void RUsbZeroCopyTransferStrategy::IsocReceivePackets(TInt aHandle, TInt aNumOfPackets) |
|
843 { |
|
844 ReceivePackets(aHandle, aNumOfPackets); |
|
845 } |
|
846 |
|
847 |
|
848 //Calculate-alignment related methods |
|
849 |
|
850 /** |
|
851 Scan through all the bulk/interrupt endpoints associated with the particular interface |
|
852 (and all its alternate settings) to find the maximum bMaxPacketSize across all of these. |
|
853 For Interrupt, if there is EP of which the maxPacketSize is not power of 2, |
|
854 the maxmaxpaceketsize will be assigned the first maxPacketSize which is not power of 2. |
|
855 */ |
|
856 TInt RUsbZeroCopyTransferStrategy::GetMaximumMaxPacketSize(TInt& aMaxMaxBulk, TInt& aMaxMaxInterrupt) |
|
857 { |
|
858 TUsbInterfaceDescriptor interfaceDesc; |
|
859 TInt err = iInterfaceHandle->GetInterfaceDescriptor(interfaceDesc); |
|
860 if (KErrNone != err) |
|
861 { |
|
862 return err; |
|
863 } |
|
864 |
|
865 const TUint8 KEPTransferTypeBulk = 0x02; |
|
866 const TUint8 KEPTransferTypeInterrupt = 0x03; |
|
867 const TUint8 KEPTransferTypeMask = 0x03; |
|
868 |
|
869 TBool ignoreInterruptEP = EFalse; |
|
870 //Traverse all related interface alternate settings |
|
871 TUsbGenericDescriptor* descriptor = &interfaceDesc; |
|
872 while (descriptor) |
|
873 { |
|
874 TUsbInterfaceDescriptor* interface = TUsbInterfaceDescriptor::Cast(descriptor); |
|
875 |
|
876 if (interface) |
|
877 { |
|
878 //Traverse all endpoint descriptor in the interface |
|
879 TUsbGenericDescriptor* subDescriptor = interface->iFirstChild; |
|
880 |
|
881 while (subDescriptor) |
|
882 { |
|
883 TUsbEndpointDescriptor* endpoint = TUsbEndpointDescriptor::Cast(subDescriptor); |
|
884 |
|
885 if (endpoint) |
|
886 { |
|
887 TBool isBulkEP = ((endpoint->Attributes() & KEPTransferTypeMask) == KEPTransferTypeBulk); |
|
888 TBool isInterruptEP = ((endpoint->Attributes() & KEPTransferTypeMask) == KEPTransferTypeInterrupt); |
|
889 TUint maxPacketSize = endpoint->MaxPacketSize(); |
|
890 |
|
891 //Caculate the maximum maxPacketSize |
|
892 if (isBulkEP) |
|
893 { |
|
894 if (maxPacketSize > aMaxMaxBulk) |
|
895 { |
|
896 aMaxMaxBulk = maxPacketSize; |
|
897 } |
|
898 } |
|
899 else if(isInterruptEP && !ignoreInterruptEP) |
|
900 { |
|
901 if (!IsPowerOfTwo(maxPacketSize)) |
|
902 { |
|
903 aMaxMaxInterrupt = maxPacketSize; |
|
904 ignoreInterruptEP = ETrue; |
|
905 } |
|
906 |
|
907 if (maxPacketSize > aMaxMaxInterrupt) |
|
908 { |
|
909 aMaxMaxInterrupt = maxPacketSize; |
|
910 } |
|
911 } |
|
912 } |
|
913 |
|
914 subDescriptor = subDescriptor->iNextPeer; |
|
915 } |
|
916 } |
|
917 |
|
918 descriptor = descriptor->iNextPeer; |
|
919 } |
|
920 |
|
921 return KErrNone; |
|
922 } |
|
923 |
|
924 /** |
|
925 Calculate the additional alignment requirement on bulk and interrupt transfer. |
|
926 For Bulk transfer, |
|
927 Scan through all the bulk/interrupt endpoints associated with the particular interface |
|
928 to find the maximum wMaxPacketSize across all of these. The new alignment for the transfer |
|
929 is the maximum between the maximum bMaxPacketSize and the original alignment |
|
930 For Interrupt transfer, |
|
931 Check if there is endpoints of which the wMaxPacketSize is not power of 2, |
|
932 if no, do the same as bulk; |
|
933 if yes, the size of transfer data is limited to one page size, and the additional alignment |
|
934 calcualted to make the transfer data not to span page boundary |
|
935 |
|
936 */ |
|
937 TInt RUsbZeroCopyTransferStrategy::CaculateAdditionalAlignment(TInt aCurrentOffset, TInt aMaxMaxBulk, TInt aMaxMaxInterrupt, TUsbTransferDescriptorDetails& aTransferDetails) |
|
938 { |
|
939 RUsbTransferDescriptor::TTransferType transferType = aTransferDetails.iTransferDesc.iType; |
|
940 TBool isBulkTransfer = (transferType == RUsbTransferDescriptor::EBulk); |
|
941 TBool isInterruptTransfer = (transferType == RUsbTransferDescriptor::EInterrupt); |
|
942 |
|
943 if (isBulkTransfer) |
|
944 { |
|
945 if (aMaxMaxBulk > aTransferDetails.iRequiredAlignment) |
|
946 { |
|
947 aTransferDetails.iRequiredAlignment = aMaxMaxBulk; |
|
948 } |
|
949 } |
|
950 else if (isInterruptTransfer) |
|
951 { |
|
952 if (IsPowerOfTwo(aMaxMaxInterrupt)) |
|
953 { |
|
954 if (aMaxMaxInterrupt > aTransferDetails.iRequiredAlignment) |
|
955 { |
|
956 aTransferDetails.iRequiredAlignment = aMaxMaxInterrupt; |
|
957 } |
|
958 } |
|
959 else |
|
960 { |
|
961 if (aTransferDetails.iRequiredSize > iPageSize) |
|
962 { |
|
963 //The transfer data can not span the page boundary |
|
964 //if there is EP of which wMaxPacketSize is not power-of-2, |
|
965 return KErrNotSupported; |
|
966 } |
|
967 else |
|
968 { |
|
969 TInt sizeLeftOfCurrentPage = IncNeededToAlign(aCurrentOffset,iPageSize); |
|
970 TInt alignPad = IncNeededToAlign(aCurrentOffset, aTransferDetails.iRequiredAlignment); |
|
971 |
|
972 //The transfer data can't fit into the current page |
|
973 //Align the trasfer data to the next page |
|
974 if ( sizeLeftOfCurrentPage < (alignPad + aTransferDetails.iRequiredSize) ) |
|
975 { |
|
976 aTransferDetails.iRequiredAlignment = iPageSize; |
|
977 } |
|
978 } |
|
979 } |
|
980 } |
|
981 return KErrNone; |
|
982 } |