|
1 /* |
|
2 * Copyright (c) 2007 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: Cache handling routines |
|
15 * |
|
16 */ |
|
17 |
|
18 |
|
19 #include "appdep.hpp" |
|
20 |
|
21 |
|
22 // ---------------------------------------------------------------------------------------------------------- |
|
23 |
|
24 void ReadDataFromSymbolTablesCache(target& a_target) |
|
25 { |
|
26 string line; |
|
27 |
|
28 ifstream symtabcachef(a_target.st_cache_path.c_str()); |
|
29 if (!symtabcachef.is_open()) |
|
30 { |
|
31 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
32 a_target.cache_files_valid = false; |
|
33 _some_cache_needs_update = true; |
|
34 return; |
|
35 } |
|
36 |
|
37 getline(symtabcachef, line); |
|
38 if (line != CACHE_ST_HEADER) |
|
39 { |
|
40 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
41 a_target.cache_files_valid = false; |
|
42 _some_cache_needs_update = true; |
|
43 } |
|
44 else |
|
45 { |
|
46 // loop through all lines in the file |
|
47 while(!symtabcachef.eof()) |
|
48 { |
|
49 getline(symtabcachef, line); |
|
50 |
|
51 if (line.length() > 0 && line[0] != CACHE_COMMENT_CHAR) |
|
52 { |
|
53 // first entry is the directory|filename|modification_time|symbol_table_size , eg |
|
54 // x:\epoc32\release\armv5\urel\|avkon.dll|1160666488|178 |
|
55 string::size_type delim1 = line.find(CACHE_SEP, 0); |
|
56 string::size_type delim2 = line.find(CACHE_SEP, delim1+1); |
|
57 string::size_type delim3 = line.find(CACHE_SEP, delim2+1); |
|
58 |
|
59 import_library_info lib_info; |
|
60 lib_info.directory = line.substr(0, delim1); |
|
61 lib_info.filename = line.substr(delim1+1, delim2-delim1-1); |
|
62 lib_info.mod_time = Str2Int( line.substr(delim2+1, delim3-delim2-1) ); |
|
63 unsigned int symbol_table_size = Str2Int( line.substr(delim3+1, line.length()-delim3-1) ); |
|
64 |
|
65 // get symbol table |
|
66 vector<string> symbol_table; |
|
67 for (unsigned int j=0; j<symbol_table_size; j++) |
|
68 { |
|
69 getline(symtabcachef, line); |
|
70 symbol_table.push_back( line ); |
|
71 } |
|
72 |
|
73 lib_info.symbol_table = symbol_table; |
|
74 |
|
75 // append binary info to the vectory |
|
76 a_target.import_libraries.push_back( lib_info ); |
|
77 } |
|
78 } |
|
79 |
|
80 // check that the last line of the file contains the footer of the cache |
|
81 if (line != CACHE_FOOTER) |
|
82 { |
|
83 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
84 a_target.import_libraries.clear(); |
|
85 a_target.cache_files_valid = false; |
|
86 _some_cache_needs_update = true; |
|
87 } |
|
88 } |
|
89 |
|
90 symtabcachef.close(); |
|
91 } |
|
92 |
|
93 // ---------------------------------------------------------------------------------------------------------- |
|
94 |
|
95 void ReadDataFromDependenciesCache(target& a_target) |
|
96 { |
|
97 string line; |
|
98 |
|
99 // read data from the dependencies cache file |
|
100 ifstream depcachef(a_target.dep_cache_path.c_str()); |
|
101 if (!depcachef.is_open()) |
|
102 { |
|
103 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
104 a_target.cache_files_valid = false; |
|
105 _some_cache_needs_update = true; |
|
106 return; |
|
107 } |
|
108 |
|
109 getline(depcachef, line); |
|
110 if (line != CACHE_DEP_HEADER) |
|
111 { |
|
112 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
113 a_target.cache_files_valid = false; |
|
114 _some_cache_needs_update = true; |
|
115 } |
|
116 else |
|
117 { |
|
118 // loop through all lines in the file |
|
119 while(!depcachef.eof()) |
|
120 { |
|
121 getline(depcachef, line); |
|
122 |
|
123 if (line.length() > 0 && line[0] != CACHE_COMMENT_CHAR) |
|
124 { |
|
125 // first entry is the directory|filename|binary_format|uid1|uid2|uid3|secureid|vendorid|capabilities|min_heap_size|max_heap_size|stack_size|modification_time|number_of_dependencies , eg |
|
126 // x:\epoc32\release\armv5\urel\|about.exe|EPOC Exe for ARMV4 CPU|0x1000007a|0x100039ce|0x10005a22|0x10005a22|0x101fb657|782384|4096|1048576|8192|1160666488|11 |
|
127 string::size_type delim1 = line.find(CACHE_SEP, 0); |
|
128 string::size_type delim2 = line.find(CACHE_SEP, delim1+1); |
|
129 string::size_type delim3 = line.find(CACHE_SEP, delim2+1); |
|
130 string::size_type delim4 = line.find(CACHE_SEP, delim3+1); |
|
131 string::size_type delim5 = line.find(CACHE_SEP, delim4+1); |
|
132 string::size_type delim6 = line.find(CACHE_SEP, delim5+1); |
|
133 string::size_type delim7 = line.find(CACHE_SEP, delim6+1); |
|
134 string::size_type delim8 = line.find(CACHE_SEP, delim7+1); |
|
135 string::size_type delim9 = line.find(CACHE_SEP, delim8+1); |
|
136 string::size_type delim10 = line.find(CACHE_SEP, delim9+1); |
|
137 string::size_type delim11 = line.find(CACHE_SEP, delim10+1); |
|
138 string::size_type delim12 = line.find(CACHE_SEP, delim11+1); |
|
139 string::size_type delim13 = line.find(CACHE_SEP, delim12+1); |
|
140 |
|
141 binary_info b_info; |
|
142 b_info.directory = line.substr(0, delim1); |
|
143 b_info.filename = line.substr(delim1+1, delim2-delim1-1); |
|
144 b_info.binary_format = line.substr(delim2+1, delim3-delim2-1); |
|
145 b_info.uid1 = line.substr(delim3+1, delim4-delim3-1); |
|
146 b_info.uid2 = line.substr(delim4+1, delim5-delim4-1); |
|
147 b_info.uid3 = line.substr(delim5+1, delim6-delim5-1); |
|
148 b_info.secureid = line.substr(delim6+1, delim7-delim6-1); |
|
149 b_info.vendorid = line.substr(delim7+1, delim8-delim7-1); |
|
150 b_info.capabilities = Str2Int( line.substr(delim8+1, delim9-delim8-1) ); |
|
151 b_info.min_heap_size = Str2Int( line.substr(delim9+1, delim10-delim9-1) ); |
|
152 b_info.max_heap_size = Str2Int( line.substr(delim10+1, delim11-delim10-1) ); |
|
153 b_info.stack_size = Str2Int( line.substr(delim11+1, delim12-delim11-1) ); |
|
154 b_info.mod_time = Str2Int( line.substr(delim12+1, delim13-delim12-1) ); |
|
155 unsigned int number_of_deps = Str2Int( line.substr(delim13+1, line.length()-delim13-1) ); |
|
156 |
|
157 vector<dependency> deps; |
|
158 for (unsigned int j=0; j<number_of_deps; j++) |
|
159 { |
|
160 getline(depcachef, line); |
|
161 |
|
162 // second type entry is filename|number_of_imports , eg |
|
163 // APPARC.DLL|6 |
|
164 string::size_type delim1 = line.find(CACHE_SEP, 0); |
|
165 |
|
166 dependency dep; |
|
167 dep.filename = line.substr(0, delim1); |
|
168 unsigned int number_of_imports = Str2Int( line.substr(delim1+1, line.length()-delim1-1) ); |
|
169 |
|
170 vector<import> imps; |
|
171 for (unsigned int k=0; k<number_of_imports; k++) |
|
172 { |
|
173 getline(depcachef, line); |
|
174 |
|
175 // third type on entry is funcpos|funcname|is_vtable|vtable_offset, eg |
|
176 // 121|CApaDocument::Capability() const|0|0 |
|
177 string::size_type delim1 = line.find(CACHE_SEP, 0); |
|
178 string::size_type delim2 = line.find(CACHE_SEP, delim1+1); |
|
179 string::size_type delim3 = line.find(CACHE_SEP, delim2+1); |
|
180 |
|
181 import imp; |
|
182 imp.funcpos = Str2Int( line.substr(0, delim1) ); |
|
183 imp.funcname = line.substr(delim1+1, delim2-delim1-1); |
|
184 imp.is_vtable = Str2Int( line.substr(delim2+1, delim3-delim2-1) ); |
|
185 imp.vtable_offset = Str2Int( line.substr(delim3+1, line.length()-delim3-1) ); |
|
186 |
|
187 // append to the import info vector |
|
188 imps.push_back( imp ); |
|
189 } |
|
190 |
|
191 // now we have import info too |
|
192 dep.imports = imps; |
|
193 |
|
194 // append to the deps info vector |
|
195 deps.push_back( dep ); |
|
196 |
|
197 } |
|
198 // now we have the dep info too |
|
199 b_info.dependencies = deps; |
|
200 |
|
201 // apppend binary info to the vector |
|
202 a_target.binaries.push_back( b_info ); |
|
203 } |
|
204 } |
|
205 |
|
206 // check that the last line of the file contains the footer of the cache |
|
207 if (line != CACHE_FOOTER) |
|
208 { |
|
209 cerr << "Warning: Regenerating corrupted cache of target " << a_target.name << endl; |
|
210 a_target.binaries.clear(); |
|
211 a_target.cache_files_valid = false; |
|
212 _some_cache_needs_update = true; |
|
213 } |
|
214 } |
|
215 depcachef.close(); |
|
216 } |
|
217 |
|
218 // ---------------------------------------------------------------------------------------------------------- |
|
219 |
|
220 void GetDataFromImportTables(target& a_target) |
|
221 { |
|
222 // read data from import libraries if needed |
|
223 for (unsigned int i=0; i<a_target.lib_files.size(); i++) |
|
224 { |
|
225 bool is_new_file = true; |
|
226 |
|
227 vector<string> symbol_table; |
|
228 |
|
229 if (_cl_print_debug) |
|
230 cerr << "Processing " << a_target.release_lib_dir << a_target.lib_files.at(i) << "..."; |
|
231 |
|
232 // if not generating a clean cache, check if this file was already in the cache |
|
233 if (!_cl_generate_clean_cache) |
|
234 { |
|
235 // first try to find existing file |
|
236 bool update_file = false; |
|
237 int position = 0; |
|
238 |
|
239 for (unsigned int j=0; j<a_target.import_libraries.size(); j++) |
|
240 { |
|
241 // check if names match |
|
242 if (StringICmpFileNamesWithoutExtension(a_target.lib_files.at(i), a_target.import_libraries.at(j).filename) == 0) |
|
243 { |
|
244 // the file was already found from the cache |
|
245 is_new_file = false; |
|
246 |
|
247 // compare modification times |
|
248 struct stat stat_p; |
|
249 stat((a_target.release_lib_dir + a_target.lib_files.at(i)).c_str(), &stat_p); // get new stats |
|
250 |
|
251 if (!TimestampsMatches(a_target.import_libraries.at(j).mod_time, stat_p.st_mtime)) |
|
252 { |
|
253 // time stamps are different so needs to update the file |
|
254 update_file = true; |
|
255 position = j; |
|
256 } |
|
257 |
|
258 // there can't be anymore same names, so break the loop anyway |
|
259 break; |
|
260 } |
|
261 } |
|
262 |
|
263 // get the new data |
|
264 if (update_file) |
|
265 { |
|
266 a_target.cache_files_valid = false; // cache files on disk must be rewritten |
|
267 |
|
268 import_library_info& lib_info = a_target.import_libraries.at(position); |
|
269 lib_info.directory = a_target.release_lib_dir; |
|
270 |
|
271 if (_cl_use_gcc) |
|
272 { |
|
273 GetSymbolTableWithNM(_gcc_nm_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
274 } |
|
275 else if (_cl_use_gcce) |
|
276 { |
|
277 if (_cl_use_libs) |
|
278 { |
|
279 GetSymbolTableWithNM(_gcce_nm_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
280 } |
|
281 else |
|
282 { |
|
283 GetSymbolTableWithReadelf(_gcce_readelf_location, _gcce_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
284 } |
|
285 } |
|
286 else if (_cl_use_rvct) |
|
287 { |
|
288 if (_cl_use_libs) |
|
289 { |
|
290 GetSymbolTableWithArmar(_rvct_armar_location, _rvct_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
291 } |
|
292 else |
|
293 { |
|
294 GetSymbolTableWithFromelf(_rvct_fromelf_location, _rvct_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
295 } |
|
296 } |
|
297 |
|
298 lib_info.symbol_table = symbol_table; |
|
299 |
|
300 // get statistics of the file and set the modification time |
|
301 struct stat stat_p; |
|
302 stat((a_target.release_lib_dir + a_target.lib_files.at(i)).c_str(), &stat_p); |
|
303 lib_info.mod_time = stat_p.st_mtime; |
|
304 |
|
305 // record changed import libraries |
|
306 _changed_import_libraries.push_back( lib_info ); |
|
307 } |
|
308 } |
|
309 |
|
310 // this is a new file, get info and append it to the vector |
|
311 if (is_new_file) |
|
312 { |
|
313 a_target.cache_files_valid = false; // cache files on disk must be rewritten |
|
314 |
|
315 // get the symbol tables of the library |
|
316 if (_cl_use_gcc) |
|
317 { |
|
318 GetSymbolTableWithNM(_gcc_nm_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
319 } |
|
320 else if (_cl_use_gcce) |
|
321 { |
|
322 if (_cl_use_libs) |
|
323 { |
|
324 GetSymbolTableWithNM(_gcce_nm_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
325 } |
|
326 else |
|
327 { |
|
328 GetSymbolTableWithReadelf(_gcce_readelf_location, _gcce_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
329 } |
|
330 } |
|
331 else if (_cl_use_rvct) |
|
332 { |
|
333 if (_cl_use_libs) |
|
334 { |
|
335 GetSymbolTableWithArmar(_rvct_armar_location, _rvct_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
336 } |
|
337 else |
|
338 { |
|
339 GetSymbolTableWithFromelf(_rvct_fromelf_location, _rvct_cfilt_location, a_target.release_lib_dir, a_target.lib_files.at(i), symbol_table); |
|
340 } |
|
341 } |
|
342 |
|
343 // get statistics of the file |
|
344 struct stat stat_p; |
|
345 stat((a_target.release_lib_dir + a_target.lib_files.at(i)).c_str(), &stat_p); |
|
346 |
|
347 // create a new entry to list of all import libraries |
|
348 import_library_info lib_info; |
|
349 lib_info.directory = a_target.release_lib_dir; |
|
350 lib_info.filename = a_target.lib_files.at(i); |
|
351 lib_info.mod_time = stat_p.st_mtime; |
|
352 lib_info.symbol_table = symbol_table; |
|
353 |
|
354 a_target.import_libraries.push_back( lib_info ); |
|
355 } |
|
356 if (_cl_print_debug) |
|
357 cerr << "OK" << endl; |
|
358 else |
|
359 ShowProgressInfo(_current_progress_percentage, _current_progress, _max_progress, false); |
|
360 } |
|
361 } |
|
362 |
|
363 // ---------------------------------------------------------------------------------------------------------- |
|
364 |
|
365 void GetDataFromBinaries(target& a_target) |
|
366 { |
|
367 // read data from binaries |
|
368 for (unsigned int i=0; i<a_target.bin_files.size(); i++) |
|
369 { |
|
370 bool is_new_file = true; |
|
371 |
|
372 if (_cl_print_debug) |
|
373 cerr << "Processing " << a_target.release_bin_dir << a_target.bin_files.at(i) << "..."; |
|
374 |
|
375 // if not generating a clean cache, check if this file was already in the cache |
|
376 if (!_cl_generate_clean_cache) |
|
377 { |
|
378 // first try to find existing file |
|
379 bool update_file = false; |
|
380 int position = 0; |
|
381 |
|
382 for (unsigned int j=0; j<a_target.binaries.size(); j++) |
|
383 { |
|
384 // check if names match |
|
385 if (StringICmp(a_target.bin_files.at(i).c_str(), a_target.binaries.at(j).filename.c_str()) == 0) |
|
386 { |
|
387 is_new_file = false; |
|
388 |
|
389 // compare modification times |
|
390 struct stat stat_p; |
|
391 stat((a_target.release_bin_dir + a_target.bin_files.at(i)).c_str(), &stat_p); // get new stats |
|
392 |
|
393 if (!TimestampsMatches(a_target.binaries.at(j).mod_time, stat_p.st_mtime)) |
|
394 { |
|
395 // time stamps are different so needs to update the file |
|
396 update_file = true; |
|
397 position = j; |
|
398 break; |
|
399 } |
|
400 |
|
401 // the entry also needs to be updated if any import library which this binary has dependency on has changed |
|
402 for (unsigned int k=0; k<_changed_import_libraries.size(); k++) |
|
403 { |
|
404 for (unsigned int p=0; p<a_target.binaries.at(j).dependencies.size(); p++) |
|
405 { |
|
406 // check for file name match |
|
407 if (StringICmpFileNamesWithoutExtension(a_target.binaries.at(j).dependencies.at(p).filename, _changed_import_libraries.at(k).filename) == 0) |
|
408 { |
|
409 update_file = true; |
|
410 position = j; |
|
411 break; |
|
412 } |
|
413 } |
|
414 |
|
415 // no need to loop anymore if the file needs update |
|
416 if (update_file) |
|
417 break; |
|
418 } |
|
419 |
|
420 // there can't be anymore same names, so break the loop anyway |
|
421 break; |
|
422 } |
|
423 } |
|
424 |
|
425 // get the new data |
|
426 if (update_file) |
|
427 { |
|
428 a_target.cache_files_valid = false; // cache files on disk must be rewritten |
|
429 |
|
430 binary_info& b_info = a_target.binaries.at(position); |
|
431 b_info.directory = a_target.release_bin_dir; |
|
432 |
|
433 GetImportTableWithPetran(_petran_location, b_info); |
|
434 |
|
435 // get statistics of the file and set the modification time |
|
436 struct stat stat_p; |
|
437 stat((a_target.release_bin_dir + a_target.bin_files.at(i)).c_str(), &stat_p); |
|
438 b_info.mod_time = stat_p.st_mtime; |
|
439 } |
|
440 } |
|
441 |
|
442 // this is a new file, get info and append it to the vector |
|
443 if (is_new_file) |
|
444 { |
|
445 a_target.cache_files_valid = false; // cache files on disk must be rewritten |
|
446 |
|
447 binary_info b_info; |
|
448 b_info.directory = a_target.release_bin_dir; |
|
449 b_info.filename = a_target.bin_files.at(i); |
|
450 |
|
451 GetImportTableWithPetran(_petran_location, b_info); |
|
452 |
|
453 // get statistics of the file and set the modification time |
|
454 struct stat stat_p; |
|
455 stat((a_target.release_bin_dir + a_target.bin_files.at(i)).c_str(), &stat_p); |
|
456 b_info.mod_time = stat_p.st_mtime; |
|
457 |
|
458 // create a new entry to list of all binary files |
|
459 a_target.binaries.push_back( b_info ); |
|
460 } |
|
461 |
|
462 if (_cl_print_debug) |
|
463 cerr << "OK" << endl; |
|
464 else |
|
465 ShowProgressInfo(_current_progress_percentage, _current_progress, _max_progress, false); |
|
466 } |
|
467 |
|
468 } |
|
469 |
|
470 // ---------------------------------------------------------------------------------------------------------- |
|
471 |
|
472 void WriteDataToSymbolTableCacheFile(const target& a_target) |
|
473 { |
|
474 // open the cache file for writing |
|
475 ofstream symtabcachef(a_target.st_cache_path.c_str(), ios::trunc); |
|
476 if (!symtabcachef.is_open()) |
|
477 { |
|
478 symtabcachef.close(); |
|
479 cerr << endl << "ERROR: Cannot open " << a_target.st_cache_path << " for writing!" << endl; |
|
480 cerr << "Please check that the directory exists and there are no write permission problems" << endl; |
|
481 exit(EXIT_CANNOT_WRITE_TO_CACHE_FILE); |
|
482 } |
|
483 |
|
484 // write data to the cache file |
|
485 symtabcachef << CACHE_ST_HEADER << endl; |
|
486 |
|
487 for (unsigned int i=0; i<a_target.import_libraries.size(); i++) |
|
488 { |
|
489 vector<string> symbol_table = a_target.import_libraries.at(i).symbol_table; |
|
490 |
|
491 symtabcachef << a_target.import_libraries.at(i).directory << CACHE_SEP << a_target.import_libraries.at(i).filename << CACHE_SEP |
|
492 << a_target.import_libraries.at(i).mod_time << CACHE_SEP << symbol_table.size() << endl; |
|
493 |
|
494 for (unsigned int j=0; j<symbol_table.size(); j++) |
|
495 { |
|
496 symtabcachef << symbol_table.at(j) << endl; |
|
497 } |
|
498 } |
|
499 |
|
500 // write footer, note that there is no eol char |
|
501 symtabcachef << CACHE_FOOTER; |
|
502 |
|
503 symtabcachef.close(); |
|
504 } |
|
505 |
|
506 // ---------------------------------------------------------------------------------------------------------- |
|
507 |
|
508 void WriteDataToDependenciesCacheFile(const target& a_target) |
|
509 { |
|
510 // open the cache file for writing |
|
511 ofstream depcachef(a_target.dep_cache_path.c_str(), ios::trunc); |
|
512 if (!depcachef.is_open()) |
|
513 { |
|
514 depcachef.close(); |
|
515 cerr << endl << "ERROR: Cannot open " << a_target.dep_cache_path << " for writing!" << endl; |
|
516 cerr << "Please check that the directory exists and there are no write permission problems" << endl; |
|
517 exit(EXIT_CANNOT_WRITE_TO_CACHE_FILE); |
|
518 } |
|
519 |
|
520 // write data to the cache file |
|
521 depcachef << CACHE_DEP_HEADER << endl; |
|
522 |
|
523 for (unsigned int i=0; i<a_target.binaries.size(); i++) |
|
524 { |
|
525 vector<dependency> deps = a_target.binaries.at(i).dependencies; |
|
526 |
|
527 depcachef << a_target.binaries.at(i).directory << CACHE_SEP << a_target.binaries.at(i).filename << CACHE_SEP |
|
528 << a_target.binaries.at(i).binary_format << CACHE_SEP << a_target.binaries.at(i).uid1 << CACHE_SEP |
|
529 << a_target.binaries.at(i).uid2 << CACHE_SEP << a_target.binaries.at(i).uid3 << CACHE_SEP |
|
530 << a_target.binaries.at(i).secureid << CACHE_SEP << a_target.binaries.at(i).vendorid << CACHE_SEP |
|
531 << a_target.binaries.at(i).capabilities << CACHE_SEP << a_target.binaries.at(i).min_heap_size << CACHE_SEP |
|
532 << a_target.binaries.at(i).max_heap_size << CACHE_SEP << a_target.binaries.at(i).stack_size << CACHE_SEP |
|
533 << a_target.binaries.at(i).mod_time << CACHE_SEP << deps.size() << endl; |
|
534 |
|
535 for (unsigned int j=0; j<deps.size(); j++) |
|
536 { |
|
537 vector<import> imps = deps.at(j).imports; |
|
538 |
|
539 depcachef << deps.at(j).filename << CACHE_SEP << imps.size() << endl; |
|
540 |
|
541 for (unsigned int k=0; k<imps.size(); k++) |
|
542 { |
|
543 depcachef << imps.at(k).funcpos << CACHE_SEP << imps.at(k).funcname << CACHE_SEP << imps.at(k).is_vtable |
|
544 << CACHE_SEP << imps.at(k).vtable_offset << endl; |
|
545 } |
|
546 } |
|
547 } |
|
548 |
|
549 // write footer, note that there is no eol char |
|
550 depcachef << CACHE_FOOTER; |
|
551 |
|
552 depcachef.close(); |
|
553 } |
|
554 |
|
555 // ---------------------------------------------------------------------------------------------------------- |
|
556 |
|
557 |