3
|
1 |
#
|
|
2 |
# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
3 |
# All rights reserved.
|
|
4 |
# This component and the accompanying materials are made available
|
|
5 |
# under the terms of the License "Eclipse Public License v1.0"
|
|
6 |
# which accompanies this distribution, and is available
|
|
7 |
# at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
8 |
#
|
|
9 |
# Initial Contributors:
|
|
10 |
# Nokia Corporation - initial contribution.
|
|
11 |
#
|
|
12 |
# Contributors:
|
|
13 |
#
|
|
14 |
# Description:
|
|
15 |
#
|
|
16 |
|
|
17 |
import raptor_utilities
|
|
18 |
import os
|
|
19 |
import re
|
|
20 |
import sys
|
|
21 |
import filter_interface
|
|
22 |
import xml.parsers.expat
|
|
23 |
import raptor
|
|
24 |
import generic_path
|
|
25 |
import tempfile
|
|
26 |
|
|
27 |
# This filter has not been tested on linux
|
|
28 |
if not raptor_utilities.getOSPlatform().startswith("linux"):
|
|
29 |
|
|
30 |
# Compares the two paths, and reports the differences highlighted by a "^" character
|
|
31 |
# Output Generated will be like this:
|
|
32 |
# Reference in metadata -> C:/foo/bar/cat.cpp
|
|
33 |
# -------^-------^--
|
|
34 |
# Reference in metadata -> C:/foo/Bar/cat.Cpp
|
|
35 |
def reportcsdifference(path1, path2):
|
|
36 |
|
|
37 |
same = "-"
|
|
38 |
different = "^"
|
|
39 |
space = ' '
|
|
40 |
metadataString = 'Reference in metadata -> '
|
|
41 |
ondiskString = 'Actual case on disk -> '
|
|
42 |
|
|
43 |
sys.stderr.write(metadataString + path2 +"\n")
|
|
44 |
separator = ""
|
|
45 |
for i, e in enumerate(path1):
|
|
46 |
try:
|
|
47 |
if e != path2[i]:
|
|
48 |
separator += different
|
|
49 |
else:
|
|
50 |
separator += same
|
|
51 |
except IndexError:
|
|
52 |
separator += '*'
|
|
53 |
|
|
54 |
separator += different * (len(path1)-len(path2))
|
|
55 |
|
|
56 |
sys.stderr.write(space*len(metadataString) + separator +"\n") # Print the separator in alignment with the metadataString
|
|
57 |
sys.stderr.write(ondiskString + path1 + "\n")
|
|
58 |
|
|
59 |
class FilterCheckSource(filter_interface.Filter):
|
|
60 |
|
|
61 |
def open(self, raptor_instance):
|
|
62 |
self.raptor = raptor_instance
|
|
63 |
self.ok = True
|
|
64 |
self.errors = 0
|
|
65 |
self.checked = []
|
|
66 |
self.check = raptor_instance.doCheck
|
|
67 |
self.casechecker = CheckCase()
|
|
68 |
|
|
69 |
# Expat Parser initialisation
|
|
70 |
self.p = xml.parsers.expat.ParserCreate()
|
|
71 |
self.p.StartElementHandler = self.startelement # Handles opening XML tags
|
|
72 |
self.p.EndElementHandler = self.endelement # Handles closing XML tags
|
|
73 |
self.p.CharacterDataHandler = self.chardata # Handles data between opening/closing tags
|
|
74 |
|
|
75 |
# Regex initialisation
|
|
76 |
self.rvctdependfinder = re.compile("--depend\s+(.*?d)(?:\s+|$)", re.IGNORECASE|re.DOTALL)
|
|
77 |
self.cwdependfinder = re.compile("#'\s+(.*?\.dep)", re.IGNORECASE|re.DOTALL)
|
|
78 |
|
|
79 |
# Data to be passed to case checkers
|
|
80 |
self.currentmmp = ""
|
|
81 |
self.currentbldinf = ""
|
|
82 |
self.currentconfig = ""
|
|
83 |
|
|
84 |
self.filestocheck = []
|
|
85 |
|
|
86 |
# Need this flag for the chardata method that does not have the name of the
|
|
87 |
# current XML element passed to it as a parameter.
|
|
88 |
self.infiletag = False
|
|
89 |
|
|
90 |
# Create a temporary file to record all dependency files. We can only parse those after
|
|
91 |
# make has finished running all the compile commands and by definition these
|
|
92 |
# files should therefore exist.
|
|
93 |
try:
|
|
94 |
self.tmp = tempfile.TemporaryFile()
|
|
95 |
except:
|
|
96 |
sys.stderr.write("sbs: could not create temporary file for FilterClean\n")
|
|
97 |
self.ok = False
|
|
98 |
|
|
99 |
return self.ok
|
|
100 |
|
|
101 |
def write(self, text):
|
|
102 |
# Slightly nasty that we have to "ignore" exceptions, but the xml parser
|
|
103 |
# generates this when it encounters non-xml lines (like make: nothing to be done for 'export')
|
|
104 |
try:
|
|
105 |
self.p.Parse(text.rstrip())
|
|
106 |
except xml.parsers.expat.ExpatError:
|
|
107 |
pass
|
|
108 |
|
|
109 |
return self.ok
|
|
110 |
|
|
111 |
def saveitem(self, path):
|
|
112 |
"put path into a temporary file."
|
|
113 |
try:
|
|
114 |
self.tmp.write(path + "\n")
|
|
115 |
except:
|
|
116 |
sys.stderr.write("sbs: could not write temporary file in FilterCheckSource\n")
|
|
117 |
self.ok = False
|
|
118 |
|
|
119 |
def startelement(self, name, attrs):
|
|
120 |
# Check the source code cpp files - obtained from the "source"
|
|
121 |
# attribute of compile and other tags
|
|
122 |
if 'source' in attrs.keys():
|
|
123 |
if attrs['source'] != "":
|
|
124 |
self.filestocheck.append(attrs['source'])
|
|
125 |
|
|
126 |
# Record the current metadata files and config
|
|
127 |
if name == "clean":
|
|
128 |
self.currentmmp = attrs["mmp"]
|
|
129 |
self.currentbldinf = attrs["bldinf"]
|
|
130 |
self.currentconfig = attrs["config"]
|
|
131 |
|
|
132 |
# Indicates we are in a <file> element
|
|
133 |
if name == "file":
|
|
134 |
# Need to use a flag to indicate that we are processing a file tag
|
|
135 |
self.infiletag = True
|
|
136 |
|
|
137 |
def chardata(self, data):
|
|
138 |
# Strip quotes from data
|
|
139 |
unquoteddata = data.strip("\"\'")
|
|
140 |
|
|
141 |
# Use a flag to determine that we are processing a file tag since this method
|
|
142 |
# doesn't receive the "name" argument that startelement/endelement
|
|
143 |
if self.infiletag:
|
|
144 |
self.filestocheck.append(unquoteddata)
|
|
145 |
|
|
146 |
# Also write dependency file names to temp file to parse the
|
|
147 |
# contents of these at the end
|
|
148 |
if unquoteddata.endswith(".d") or unquoteddata.endswith(".dep"):
|
|
149 |
self.saveitem(unquoteddata)
|
|
150 |
|
|
151 |
# RVCT depends files
|
|
152 |
# Outside of file tags, chardata will be called on CDATA which contains
|
|
153 |
# compiler calls, hence we parse these for the "--depend" option to extract
|
|
154 |
# the .d file.
|
|
155 |
if "--depend" in data:
|
|
156 |
result = self.rvctdependfinder.findall(data)
|
|
157 |
for res in result:
|
|
158 |
self.saveitem(res)
|
|
159 |
|
|
160 |
# CW toolchain depends files
|
|
161 |
# As for RVCT, chardata will be called on CDATA which contains compiler calls,
|
|
162 |
# hence we parse these for file names ending in .dep after the sequence #, ' and
|
|
163 |
# a space. The win32.flm munges the contents of these files around so we are really
|
|
164 |
# interested in the .o.d files - these have the same path as the .dep files but
|
|
165 |
# with the extension changed to .o.d from .dep.
|
|
166 |
if ".dep" in data:
|
|
167 |
result = self.cwdependfinder.findall(data)
|
|
168 |
for res in result:
|
|
169 |
self.saveitem(res.replace(".dep", ".o.d"))
|
|
170 |
|
|
171 |
def endelement(self, name):
|
|
172 |
# Blank out the mmp, bldinf and config for next clean tag (in case it has any blanks)
|
|
173 |
if name == "clean":
|
|
174 |
self.currentmmp = ""
|
|
175 |
self.currentbldinf = ""
|
|
176 |
self.currentconfig = ""
|
|
177 |
|
|
178 |
if name == "file":
|
|
179 |
self.infiletag = False
|
|
180 |
|
|
181 |
if len(self.filestocheck) > 0:
|
|
182 |
# Check the found file(s)
|
|
183 |
for filename in self.filestocheck:
|
|
184 |
self.checksource(filename)
|
|
185 |
|
|
186 |
# Reset list so as not to re-check already checked files
|
|
187 |
self.filestocheck = []
|
|
188 |
|
|
189 |
def close(self):
|
|
190 |
return self.ok
|
|
191 |
|
|
192 |
def summary(self):
|
|
193 |
|
|
194 |
depparser = DependenciesParser()
|
|
195 |
dependenciesfileset = set() # Stores the files listed inside depdendency files
|
|
196 |
deps = [] # Stores dependency (.d and .dep) files
|
|
197 |
|
|
198 |
try:
|
|
199 |
self.tmp.flush() # write what is left in the buffer
|
|
200 |
self.tmp.seek(0) # rewind to the beginning
|
|
201 |
|
|
202 |
for line in self.tmp.readlines():
|
|
203 |
path = line.strip()
|
|
204 |
|
|
205 |
# Only try to parse the file if it exists as a file, and if we haven't done so
|
|
206 |
# already (store the list of parsed files in the set "dependenciesfileset"
|
|
207 |
if os.path.isfile(path) and not path in dependenciesfileset:
|
|
208 |
dependenciesfileset.add(path)
|
|
209 |
|
|
210 |
# Here we parse each dependency file and form a list of the prerequisites contained therein
|
|
211 |
dependencyfilelines = depparser.readdepfilelines(path) # Read the lines
|
|
212 |
dependencyfilestr = depparser.removelinecontinuation(dependencyfilelines) # Join them up
|
|
213 |
dependencyfiles = depparser.getdependencies(dependencyfilestr) # Get prerequisites
|
|
214 |
deps.extend(dependencyfiles) # Add to list
|
|
215 |
else:
|
|
216 |
sys.stdout.write("\t" + path + " does not exist\n")
|
|
217 |
|
|
218 |
self.tmp.close() # This also deletes the temporary file
|
|
219 |
|
|
220 |
# Make a set of the prerequisites listed in the dependency files
|
|
221 |
# so we only check each one once
|
|
222 |
depset = set(deps)
|
|
223 |
deplistnodups = list(depset)
|
|
224 |
|
|
225 |
# Do the check for each file
|
|
226 |
for dep in deplistnodups:
|
|
227 |
dep = os.path.normpath(dep).replace('\\', '/')
|
|
228 |
self.checksource(dep)
|
|
229 |
|
|
230 |
except Exception, e:
|
|
231 |
sys.stderr.write("sbs: could not access temporary file for FilterClean\n")
|
|
232 |
|
|
233 |
if self.errors == 0:
|
|
234 |
sys.stdout.write("No checksource errors found\n")
|
|
235 |
else:
|
|
236 |
sys.stdout.write("\n %d checksource errors found in the build\n" % self.errors)
|
|
237 |
|
|
238 |
|
|
239 |
def checksource(self, path):
|
|
240 |
normedpath = path.replace("\"", "") # Remove quoting
|
|
241 |
|
|
242 |
if normedpath not in self.checked:
|
|
243 |
self.checked.append(normedpath)
|
|
244 |
try:
|
|
245 |
realpath = self.casechecker.checkcase(normedpath)
|
|
246 |
except IOError, e:
|
|
247 |
# file does not exist so just return
|
|
248 |
return
|
|
249 |
|
|
250 |
if not realpath == normedpath and realpath != "":
|
|
251 |
self.ok = False
|
|
252 |
self.errors += 1
|
|
253 |
sys.stderr.write("\nChecksource Failure:\n")
|
|
254 |
reportcsdifference(realpath, normedpath)
|
|
255 |
|
|
256 |
class CheckCase(object):
|
|
257 |
"""Used to check the case of a given path matches the file system.
|
|
258 |
Caches previous lookups to reduce disk IO and improve performance"""
|
|
259 |
|
|
260 |
def __init__(self):
|
|
261 |
self.__dirsCache = {} # a hash containing the directory structure, in the same case as the file system
|
|
262 |
|
|
263 |
def checkcase(self, path):
|
|
264 |
"""Checks the path matches the file system"""
|
|
265 |
|
|
266 |
path = os.path.normpath(path)
|
|
267 |
path = path.replace('\\', '/')
|
|
268 |
|
|
269 |
if not os.path.exists(path):
|
|
270 |
raise IOError, path + " does not exist"
|
|
271 |
|
|
272 |
parts = path.split('/')
|
|
273 |
|
|
274 |
dirBeingChecked = parts.pop(0) + "/"
|
|
275 |
|
|
276 |
cacheItem = self.__dirsCache
|
|
277 |
|
|
278 |
for part in parts:
|
|
279 |
if not self.checkkeyignorecase(cacheItem, part):
|
|
280 |
|
|
281 |
dirItems = os.listdir(dirBeingChecked)
|
|
282 |
|
|
283 |
found = False
|
|
284 |
|
|
285 |
for dirItem in dirItems:
|
|
286 |
if os.path.isdir(os.path.join(dirBeingChecked, dirItem)):
|
|
287 |
if not cacheItem.has_key(dirItem):
|
|
288 |
cacheItem[dirItem] = {}
|
|
289 |
|
|
290 |
if not found:
|
|
291 |
# Check if there is a dir match
|
|
292 |
if re.search("^" + part + "$", dirItem, re.IGNORECASE):
|
|
293 |
found = True
|
|
294 |
|
|
295 |
cacheItem = cacheItem[dirItem]
|
|
296 |
|
|
297 |
dirBeingChecked = os.path.join(dirBeingChecked, dirItem).replace('\\', '/')
|
|
298 |
else:
|
|
299 |
cacheItem[dirItem] = 1
|
|
300 |
|
|
301 |
if not found:
|
|
302 |
# Check if there is a dir match
|
|
303 |
if re.search("^" + part + "$", dirItem, re.IGNORECASE):
|
|
304 |
found = True
|
|
305 |
|
|
306 |
return os.path.join(dirBeingChecked, dirItem).replace('\\', '/')
|
|
307 |
|
|
308 |
else:
|
|
309 |
if os.path.isdir(os.path.join(dirBeingChecked, part)):
|
|
310 |
cacheItem = cacheItem[part]
|
|
311 |
|
|
312 |
dirBeingChecked = os.path.join(dirBeingChecked, part).replace('\\', '/')
|
|
313 |
|
|
314 |
return dirBeingChecked
|
|
315 |
|
|
316 |
def checkkeyignorecase(self, dictionary, keyToFind):
|
|
317 |
for key in dictionary.keys():
|
|
318 |
if re.search("^" + keyToFind + "$", key, re.IGNORECASE):
|
|
319 |
|
|
320 |
if not keyToFind == key:
|
|
321 |
return False
|
|
322 |
|
|
323 |
return True
|
|
324 |
|
|
325 |
return False
|
|
326 |
|
|
327 |
class DependenciesParser(object):
|
|
328 |
|
|
329 |
def __init__(self):
|
|
330 |
pass # Nop - nothing to do for init
|
|
331 |
|
|
332 |
def readdepfilelines(self, dotdfile):
|
|
333 |
""" Read the lines from a Make dependency file and return them as a list """
|
|
334 |
lines = []
|
|
335 |
try:
|
|
336 |
fh = open(dotdfile, "r")
|
|
337 |
except IOError, e:
|
|
338 |
print "Error: Failed to open file \"%s\": %s" % (dotdfile, e.strerror)
|
|
339 |
except Exception, e:
|
|
340 |
print "Error: Unknown error: %s" % str(e)
|
|
341 |
else:
|
|
342 |
lines = fh.readlines()
|
|
343 |
fh.close()
|
|
344 |
|
|
345 |
return lines
|
|
346 |
|
|
347 |
def removelinecontinuation(self, lineslist):
|
|
348 |
""" Remove line continuation chararacters '\\' from the end of any lines in
|
|
349 |
the list that have them and return a string with lines joined together """
|
|
350 |
str = " ".join(lineslist).replace('\\\n','')
|
|
351 |
return str
|
|
352 |
|
|
353 |
def getdependencies(self, dotdfilestring):
|
|
354 |
""" Splits the multi-lined string dotdfilestring and performs a regexp
|
|
355 |
match on files to the right of a : on each line """
|
|
356 |
|
|
357 |
# Strip whitespace at the start of the string
|
|
358 |
lines = dotdfilestring.lstrip().split("\n")
|
|
359 |
|
|
360 |
dependencyset = set() # Create a set to skip duplicates
|
|
361 |
for line in lines:
|
|
362 |
# Split on whitespace that is *not* preceeded by a \ - i.e.
|
|
363 |
# don't split on escaped spaces.
|
|
364 |
lineparts = re.split("(?<!\\\\)\s+", line)
|
|
365 |
|
|
366 |
# Drop element 0 as this will be the target of each rule
|
|
367 |
lineparts = lineparts[1:]
|
|
368 |
|
|
369 |
for linepart in lineparts:
|
|
370 |
# Some of the line parts are empty, so skip those
|
|
371 |
if linepart != "":
|
|
372 |
dependencyset.add(linepart)
|
|
373 |
|
|
374 |
# Create list to return from the initial set
|
|
375 |
files = list(dependencyset)
|
|
376 |
return files
|