1 /* |
|
2 * Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 */ |
|
17 #include "cxml_internal.h" |
|
18 #include <xml/cxml/nw_tinytree.h> |
|
19 #include "nw_tinytree_alloc.h" |
|
20 |
|
21 /* |
|
22 * This is not a generalized allocator. It is intended to support |
|
23 * dynamic extension of the node array or storage buffers associated |
|
24 * with a tiny tree. The goal of the design is to provide a kind of |
|
25 * virtual array whose storage can be allocated in several |
|
26 * non-contiguous segments located anywhere in memory. Since there |
|
27 * will be gaps between segments and segments may be allocated in |
|
28 * out-of-order locations (for example a second segment may be |
|
29 * allocated at an address lower than the first segment) simple |
|
30 * pointer arithmetic and array indexing cannot be used to address |
|
31 * array elements. However, rather than trying to provide a totally |
|
32 * general non-contiguous array package here, certain limitations have |
|
33 * been imposed. These simplify the implementation but mean that this |
|
34 * module can only be used with certain constraints. These constraints |
|
35 * are not currently a problem for the tiny dom parser, but any change |
|
36 * in the way the parser uses this module must be done with extreme |
|
37 * care. Eventually, we may want to generalize this package if this |
|
38 * can be done without adding too much to the footprint of computing |
|
39 * burden. |
|
40 * |
|
41 * The main constraint is that any code which writes to or reads from |
|
42 * a dynamically extended array must be sure that operations involving |
|
43 * ordinary pointer arithmetic and array indexing always occur within |
|
44 * the boudaries of a single segment. Operations that may result in |
|
45 * crossing a segment boundary must use the supplied address and |
|
46 * offset increment methods (which can be thought of as operator |
|
47 * overloads for the += operator). Furthermore, it needs to be |
|
48 * understood that any increment which results in crossing a segment |
|
49 * boundary may result in the allocation of a new segment if the |
|
50 * resulting address is not within an existing segment. When an |
|
51 * address or offset is incremented to a new segment the result will |
|
52 * be adjusted to the new segment and may have an unexpected value |
|
53 * (for example, incrementing address x by a positive increment may |
|
54 * result in an address that is less than x.). One important rule is |
|
55 * that the result of incrementing x by some value i is not guaranteed |
|
56 * to be idempotent if the increment crosses a segment boundary: i.e |
|
57 * addressIncrement(x,i) == addressIncrement(x,i) is not guaranteed to |
|
58 * be true. Future references to an address that results from an |
|
59 * increment operations must always use the result of the |
|
60 * operation. So, for example, (j = addressIncrement(x,i)) == |
|
61 * addressIncrement(x,x+j) is guaranteed to be true. |
|
62 * |
|
63 * Segment storage addresses are always padded to align with the size |
|
64 * of the data object to be stored. This means that segments allocated |
|
65 * for a specific object type must be treated as as arrays of that |
|
66 * object type (or as arrays of bytes). |
|
67 * |
|
68 * The parser code always uses this module according to these rules. |
|
69 * Specifically, no tree node crosses a segment boundary and no |
|
70 * parsable fragment of wbxml (a fragment which the parser can |
|
71 * complete) every crosses a segment boundary. This latter condition |
|
72 * allows the parser to treat its current buffer as a simple array of |
|
73 * bytes. Another rule is that all of the offsets stored in tree |
|
74 * nodes are guaranteed to address an allocated segment. For example, |
|
75 * when writing a node, the storage offset is incremented using the |
|
76 * offset increment * operation. The resulting offsets (stored in |
|
77 * nodes) can then be safely used to address the written-to storage. |
|
78 * |
|
79 */ |
|
80 |
|
81 /* |
|
82 * Allocate segments for buffer and node array storage. The base |
|
83 * segment address is the one from which all relative offsets are |
|
84 * calculated. Segments are probably not contiguous, and, given that |
|
85 * new segments might be allocated anywhere relative to existing |
|
86 * segments, the offset must be a relative one. This limits the |
|
87 * maximum relative offset to the beginning of a new segment to be a |
|
88 * signed integer of the offset type. |
|
89 */ |
|
90 |
|
91 #include <limits.h> |
|
92 #define MAX_REL_OFFSET INT_MAX |
|
93 |
|
94 static |
|
95 NW_TinyTree_SegHeader_t* |
|
96 NW_TinyTree_findLastSegment(NW_TinyTree_SegHeader_t *base){ |
|
97 NW_TinyTree_SegHeader_t *last_seg = base; |
|
98 while(last_seg->next != 0) |
|
99 { |
|
100 last_seg = (NW_TinyTree_SegHeader_t*)((NW_Byte*)base + last_seg->next); |
|
101 } |
|
102 return last_seg; |
|
103 } |
|
104 |
|
105 |
|
106 NW_TinyTree_Offset_t |
|
107 NW_TinyTree_segmentGetFreeSpace(NW_TinyTree_SegHeader_t *segment){ |
|
108 return (NW_TinyTree_Offset_t)(segment->size-segment->free_offset); |
|
109 } |
|
110 |
|
111 |
|
112 NW_TinyTree_Offset_t |
|
113 NW_TinyTree_getFreeStorageSpace(NW_TinyTree_SegHeader_t *base){ |
|
114 NW_TinyTree_SegHeader_t *last_seg = NW_TinyTree_findLastSegment(base); |
|
115 return NW_TinyTree_segmentGetFreeSpace(last_seg); |
|
116 } |
|
117 |
|
118 NW_TinyTree_SegHeader_t* |
|
119 NW_TinyTree_addSegment(NW_TinyTree_SegHeader_t *base, |
|
120 NW_TinyTree_Offset_t size){ |
|
121 NW_TinyTree_SegHeader_t *new_seg; |
|
122 NW_TinyTree_SegHeader_t *last_seg = NW_TinyTree_findLastSegment(base); |
|
123 NW_Int32 offset; |
|
124 |
|
125 /* The extra node is added to make sure we have space to pad the segment storage to an even node boundary */ |
|
126 NW_Uint32 alloc_size = size + sizeof(NW_TinyTree_SegHeader_t) + sizeof(NW_TinyTree_Node_t); |
|
127 new_seg = (NW_TinyTree_SegHeader_t*)NW_Mem_Malloc(alloc_size); |
|
128 |
|
129 if(new_seg != 0){ |
|
130 offset = (NW_Byte*)new_seg - (NW_Byte*)base; |
|
131 if(abs(offset) > MAX_REL_OFFSET){ |
|
132 NW_Mem_Free(new_seg); |
|
133 return 0; |
|
134 } |
|
135 NW_Mem_memset(new_seg, 0, alloc_size); |
|
136 /* Shift the storage pointer to an even boundary of a node so we can use this as an array of nodes */ |
|
137 new_seg->initializer = base->initializer; |
|
138 last_seg->next = (NW_TinyTree_RelativeOffset_t)offset; |
|
139 new_seg->size = size; |
|
140 new_seg->free_offset = 0; |
|
141 new_seg->storage = (NW_Uint8*) |
|
142 (((((NW_Uint32) new_seg) + sizeof(NW_TinyTree_SegHeader_t) - 1) |
|
143 / sizeof (NW_TinyTree_Node_t) + 1) |
|
144 * sizeof (NW_TinyTree_Node_t)); |
|
145 if(new_seg->initializer) |
|
146 (*(new_seg->initializer))(new_seg->storage, size); |
|
147 } |
|
148 return new_seg; |
|
149 } |
|
150 |
|
151 /* |
|
152 * Free segments allocated by addSegment only. |
|
153 */ |
|
154 |
|
155 void |
|
156 NW_TinyTree_freeSegments(NW_TinyTree_SegHeader_t *base){ |
|
157 NW_TinyTree_SegHeader_t *lastSegment = NULL; |
|
158 NW_TinyTree_SegHeader_t *current = base; |
|
159 NW_TinyTree_SegHeader_t *previous = NULL; |
|
160 |
|
161 NW_Uint16 totalAdditionalSegments = 0; |
|
162 NW_Uint16 index = 0; |
|
163 NW_Uint16 i = 0; |
|
164 |
|
165 while (current->next != 0) |
|
166 { |
|
167 totalAdditionalSegments++; |
|
168 current = (NW_TinyTree_SegHeader_t*)((NW_Byte*)base + current->next); |
|
169 } |
|
170 lastSegment = current; |
|
171 |
|
172 while(index< totalAdditionalSegments) |
|
173 { |
|
174 current = base; |
|
175 i = 0; |
|
176 while (i < (totalAdditionalSegments - index)) |
|
177 { |
|
178 previous = current; |
|
179 current = (NW_TinyTree_SegHeader_t*)((NW_Byte*)base + current->next); |
|
180 i++; |
|
181 } |
|
182 NW_Mem_Free(lastSegment); |
|
183 lastSegment = previous; |
|
184 index++; |
|
185 } |
|
186 |
|
187 } |
|
188 |
|
189 |
|
190 /* |
|
191 * Get the segment header associated with an offset. This allocates a new |
|
192 * segment if the offset is beyond any currently allocated segment. If a new |
|
193 * segment is allocated, the offset is readjusted to the beginning of the new |
|
194 * segment. |
|
195 */ |
|
196 |
|
197 NW_TinyTree_SegHeader_t* |
|
198 NW_TinyTree_addressGetSegment(NW_TinyTree_SegHeader_t *base, |
|
199 NW_Byte **address){ |
|
200 |
|
201 NW_TinyTree_SegHeader_t *segment = base; |
|
202 while(segment != 0){ |
|
203 if ((*address > segment->storage) && (*address < (segment->storage + segment->size))) |
|
204 return segment; |
|
205 if (segment->next == 0){ /* Add new segment */ |
|
206 segment = NW_TinyTree_addSegment(base, segment->size); /* Same size as last segment */ |
|
207 if(segment == 0){ |
|
208 break; |
|
209 } |
|
210 /* |
|
211 * Reset address to beginning of new segment storage. |
|
212 */ |
|
213 *address = segment->storage; |
|
214 return segment; |
|
215 } |
|
216 segment = (NW_TinyTree_SegHeader_t*)((NW_Byte*)base + segment->next); |
|
217 } |
|
218 return 0; |
|
219 } |
|
220 |
|
221 |
|
222 NW_Byte* |
|
223 NW_TinyTree_addressIncrement(NW_TinyTree_SegHeader_t *base, |
|
224 NW_Byte *address, |
|
225 NW_TinyTree_Offset_t delta){ |
|
226 NW_Byte* new_address = address + delta; |
|
227 NW_TinyTree_SegHeader_t *segment = NW_TinyTree_addressGetSegment(base, &new_address); |
|
228 if(new_address >= (segment->storage + segment->free_offset)) /* Haven't touched this memory before */ |
|
229 { |
|
230 segment->free_offset = (NW_TinyTree_Offset_t)(new_address - segment->storage); |
|
231 } |
|
232 return new_address; |
|
233 } |
|
234 |
|
235 /* |
|
236 * Get the segment header associated with an offset. This allocates a new |
|
237 * segment if the offset is beyond any currently allocated segment. If a new |
|
238 * segment is allocated, the offset is readjusted to the beginning of the new |
|
239 * segment. |
|
240 */ |
|
241 |
|
242 NW_TinyTree_SegHeader_t* |
|
243 NW_TinyTree_offsetGetSegment(NW_TinyTree_SegHeader_t *base, |
|
244 NW_TinyTree_Offset_t *offset){ |
|
245 |
|
246 NW_TinyTree_SegHeader_t *segment = base; |
|
247 while(segment != 0){ |
|
248 if (((base->storage + *offset) > segment->storage) && ((base->storage + *offset) < (segment->storage + segment->size))) |
|
249 return segment; |
|
250 if (segment->next == 0){ /* Add new segment */ |
|
251 if (segment->size > MIN_SEGMENT_SIZE) |
|
252 segment = NW_TinyTree_addSegment(base, segment->size); /* Same size as last segment */ |
|
253 else |
|
254 segment = NW_TinyTree_addSegment(base, MIN_SEGMENT_SIZE); |
|
255 if(segment == 0){ |
|
256 break; |
|
257 } |
|
258 /* |
|
259 * Reset offset to beginning of new segment storage. |
|
260 */ |
|
261 *offset = (NW_TinyTree_Offset_t)(segment->storage - base->storage); |
|
262 return segment; |
|
263 } |
|
264 segment = (NW_TinyTree_SegHeader_t*)((NW_Byte*)base + segment->next); |
|
265 } |
|
266 return 0; |
|
267 } |
|
268 |
|
269 NW_TinyTree_Offset_t |
|
270 NW_TinyTree_offsetIncrement(NW_TinyTree_SegHeader_t *base, |
|
271 NW_TinyTree_Offset_t offset, |
|
272 NW_TinyTree_Offset_t delta){ |
|
273 NW_TinyTree_Offset_t new_offset = (NW_TinyTree_Offset_t)(offset + delta); |
|
274 NW_TinyTree_SegHeader_t *segment = NW_TinyTree_offsetGetSegment(base, &new_offset); |
|
275 if(base->storage + new_offset >= (segment->storage + segment->free_offset)) /* Haven't touched this memory before */ |
|
276 { |
|
277 segment->free_offset = (NW_TinyTree_Offset_t)(base->storage + new_offset - segment->storage); |
|
278 } |
|
279 return new_offset; |
|
280 } |
|
281 |
|
282 |
|
283 |
|
284 |
|
285 |
|
286 |
|
287 |
|
288 |
|
289 |
|
290 |
|
291 |
|