179
|
1 |
# -*- coding: utf-8 -*-
|
|
2 |
"""
|
|
3 |
sphinx.util
|
|
4 |
~~~~~~~~~~~
|
|
5 |
|
|
6 |
Utility functions for Sphinx.
|
|
7 |
|
|
8 |
:copyright: 2007-2008 by Georg Brandl.
|
|
9 |
:license: BSD.
|
|
10 |
"""
|
|
11 |
|
|
12 |
import os
|
|
13 |
import re
|
|
14 |
import sys
|
|
15 |
import time
|
|
16 |
import fnmatch
|
|
17 |
import tempfile
|
|
18 |
import traceback
|
|
19 |
from os import path
|
|
20 |
|
|
21 |
|
|
22 |
# Generally useful regular expressions.
|
|
23 |
ws_re = re.compile(r'\s+')
|
|
24 |
caption_ref_re = re.compile(r'^([^<]+?)\s*<(.+)>$')
|
|
25 |
|
|
26 |
|
|
27 |
# SEP separates path elements in the canonical file names
|
|
28 |
#
|
|
29 |
# Define SEP as a manifest constant, not so much because we expect it to change
|
|
30 |
# in the future as to avoid the suspicion that a stray "/" in the code is a
|
|
31 |
# hangover from more *nix-oriented origins.
|
|
32 |
SEP = "/"
|
|
33 |
|
|
34 |
def os_path(canonicalpath):
|
|
35 |
return canonicalpath.replace(SEP, os.path.sep)
|
|
36 |
|
|
37 |
|
|
38 |
def relative_uri(base, to):
|
|
39 |
"""Return a relative URL from ``base`` to ``to``."""
|
|
40 |
b2 = base.split(SEP)
|
|
41 |
t2 = to.split(SEP)
|
|
42 |
# remove common segments
|
|
43 |
for x, y in zip(b2, t2):
|
|
44 |
if x != y:
|
|
45 |
break
|
|
46 |
b2.pop(0)
|
|
47 |
t2.pop(0)
|
|
48 |
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)
|
|
49 |
|
|
50 |
|
|
51 |
def ensuredir(path):
|
|
52 |
"""Ensure that a path exists."""
|
|
53 |
try:
|
|
54 |
os.makedirs(path)
|
|
55 |
except OSError, err:
|
|
56 |
if not err.errno == 17:
|
|
57 |
raise
|
|
58 |
|
|
59 |
|
|
60 |
def walk(top, topdown=True, followlinks=False):
|
|
61 |
"""
|
|
62 |
Backport of os.walk from 2.6, where the followlinks argument was added.
|
|
63 |
"""
|
|
64 |
names = os.listdir(top)
|
|
65 |
|
|
66 |
dirs, nondirs = [], []
|
|
67 |
for name in names:
|
|
68 |
if path.isdir(path.join(top, name)):
|
|
69 |
dirs.append(name)
|
|
70 |
else:
|
|
71 |
nondirs.append(name)
|
|
72 |
|
|
73 |
if topdown:
|
|
74 |
yield top, dirs, nondirs
|
|
75 |
for name in dirs:
|
|
76 |
fullpath = path.join(top, name)
|
|
77 |
if followlinks or not path.islink(fullpath):
|
|
78 |
for x in walk(fullpath, topdown, followlinks):
|
|
79 |
yield x
|
|
80 |
if not topdown:
|
|
81 |
yield top, dirs, nondirs
|
|
82 |
|
|
83 |
|
|
84 |
def get_matching_docs(dirname, suffix, exclude_docs=(), exclude_dirs=(),
|
|
85 |
exclude_trees=(), exclude_dirnames=()):
|
|
86 |
"""
|
|
87 |
Get all file names (without suffix) matching a suffix in a
|
|
88 |
directory, recursively.
|
|
89 |
|
|
90 |
Exclude docs in *exclude_docs*, exclude dirs in *exclude_dirs*,
|
|
91 |
prune dirs in *exclude_trees*, prune dirnames in *exclude_dirnames*.
|
|
92 |
"""
|
|
93 |
pattern = '*' + suffix
|
|
94 |
# dirname is a normalized absolute path.
|
|
95 |
dirname = path.normpath(path.abspath(dirname))
|
|
96 |
dirlen = len(dirname) + 1 # exclude slash
|
|
97 |
for root, dirs, files in walk(dirname, followlinks=True):
|
|
98 |
if root[dirlen:] in exclude_dirs:
|
|
99 |
continue
|
|
100 |
if root[dirlen:] in exclude_trees:
|
|
101 |
del dirs[:]
|
|
102 |
continue
|
|
103 |
dirs.sort()
|
|
104 |
files.sort()
|
|
105 |
for prunedir in exclude_dirnames:
|
|
106 |
if prunedir in dirs:
|
|
107 |
dirs.remove(prunedir)
|
|
108 |
for sfile in files:
|
|
109 |
if not fnmatch.fnmatch(sfile, pattern):
|
|
110 |
continue
|
|
111 |
qualified_name = path.join(root[dirlen:], sfile[:-len(suffix)])
|
|
112 |
qualified_name = qualified_name.replace(os.path.sep, SEP)
|
|
113 |
if qualified_name in exclude_docs:
|
|
114 |
continue
|
|
115 |
yield qualified_name
|
|
116 |
|
|
117 |
|
|
118 |
def mtimes_of_files(dirnames, suffix):
|
|
119 |
for dirname in dirnames:
|
|
120 |
for root, dirs, files in os.walk(dirname):
|
|
121 |
for sfile in files:
|
|
122 |
if sfile.endswith(suffix):
|
|
123 |
try:
|
|
124 |
yield path.getmtime(path.join(root, sfile))
|
|
125 |
except EnvironmentError:
|
|
126 |
pass
|
|
127 |
|
|
128 |
|
|
129 |
def shorten_result(text='', keywords=[], maxlen=240, fuzz=60):
|
|
130 |
if not text:
|
|
131 |
text = ''
|
|
132 |
text_low = text.lower()
|
|
133 |
beg = -1
|
|
134 |
for k in keywords:
|
|
135 |
i = text_low.find(k.lower())
|
|
136 |
if (i > -1 and i < beg) or beg == -1:
|
|
137 |
beg = i
|
|
138 |
excerpt_beg = 0
|
|
139 |
if beg > fuzz:
|
|
140 |
for sep in ('.', ':', ';', '='):
|
|
141 |
eb = text.find(sep, beg - fuzz, beg - 1)
|
|
142 |
if eb > -1:
|
|
143 |
eb += 1
|
|
144 |
break
|
|
145 |
else:
|
|
146 |
eb = beg - fuzz
|
|
147 |
excerpt_beg = eb
|
|
148 |
if excerpt_beg < 0:
|
|
149 |
excerpt_beg = 0
|
|
150 |
msg = text[excerpt_beg:beg+maxlen]
|
|
151 |
if beg > fuzz:
|
|
152 |
msg = '... ' + msg
|
|
153 |
if beg < len(text)-maxlen:
|
|
154 |
msg = msg + ' ...'
|
|
155 |
return msg
|
|
156 |
|
|
157 |
|
|
158 |
class attrdict(dict):
|
|
159 |
def __getattr__(self, key):
|
|
160 |
return self[key]
|
|
161 |
def __setattr__(self, key, val):
|
|
162 |
self[key] = val
|
|
163 |
def __delattr__(self, key):
|
|
164 |
del self[key]
|
|
165 |
|
|
166 |
|
|
167 |
def fmt_ex(ex):
|
|
168 |
"""Format a single line with an exception description."""
|
|
169 |
return traceback.format_exception_only(ex.__class__, ex)[-1].strip()
|
|
170 |
|
|
171 |
|
|
172 |
def rpartition(s, t):
|
|
173 |
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
|
|
174 |
i = s.rfind(t)
|
|
175 |
if i != -1:
|
|
176 |
return s[:i], s[i+len(t):]
|
|
177 |
return '', s
|
|
178 |
|
|
179 |
|
|
180 |
def format_exception_cut_frames(x=1):
|
|
181 |
"""
|
|
182 |
Format an exception with traceback, but only the last x frames.
|
|
183 |
"""
|
|
184 |
typ, val, tb = sys.exc_info()
|
|
185 |
#res = ['Traceback (most recent call last):\n']
|
|
186 |
res = []
|
|
187 |
tbres = traceback.format_tb(tb)
|
|
188 |
res += tbres[-x:]
|
|
189 |
res += traceback.format_exception_only(typ, val)
|
|
190 |
return ''.join(res)
|
|
191 |
|
|
192 |
|
|
193 |
def save_traceback():
|
|
194 |
"""
|
|
195 |
Save the current exception's traceback in a temporary file.
|
|
196 |
"""
|
|
197 |
exc = traceback.format_exc()
|
|
198 |
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
|
|
199 |
os.write(fd, exc)
|
|
200 |
os.close(fd)
|
|
201 |
return path
|
|
202 |
|
|
203 |
|
|
204 |
def _translate_pattern(pat):
|
|
205 |
"""
|
|
206 |
Translate a shell-style glob pattern to a regular expression.
|
|
207 |
|
|
208 |
Adapted from the fnmatch module, but enhanced so that single stars don't
|
|
209 |
match slashes.
|
|
210 |
"""
|
|
211 |
i, n = 0, len(pat)
|
|
212 |
res = ''
|
|
213 |
while i < n:
|
|
214 |
c = pat[i]
|
|
215 |
i += 1
|
|
216 |
if c == '*':
|
|
217 |
if i < n and pat[i] == '*':
|
|
218 |
# double star matches slashes too
|
|
219 |
i += 1
|
|
220 |
res = res + '.*'
|
|
221 |
else:
|
|
222 |
# single star doesn't match slashes
|
|
223 |
res = res + '[^/]*'
|
|
224 |
elif c == '?':
|
|
225 |
# question mark doesn't match slashes too
|
|
226 |
res = res + '[^/]'
|
|
227 |
elif c == '[':
|
|
228 |
j = i
|
|
229 |
if j < n and pat[j] == '!':
|
|
230 |
j += 1
|
|
231 |
if j < n and pat[j] == ']':
|
|
232 |
j += 1
|
|
233 |
while j < n and pat[j] != ']':
|
|
234 |
j += 1
|
|
235 |
if j >= n:
|
|
236 |
res = res + '\\['
|
|
237 |
else:
|
|
238 |
stuff = pat[i:j].replace('\\', '\\\\')
|
|
239 |
i = j + 1
|
|
240 |
if stuff[0] == '!':
|
|
241 |
# negative pattern mustn't match slashes too
|
|
242 |
stuff = '^/' + stuff[1:]
|
|
243 |
elif stuff[0] == '^':
|
|
244 |
stuff = '\\' + stuff
|
|
245 |
res = '%s[%s]' % (res, stuff)
|
|
246 |
else:
|
|
247 |
res += re.escape(c)
|
|
248 |
return res + '$'
|
|
249 |
|
|
250 |
|
|
251 |
_pat_cache = {}
|
|
252 |
|
|
253 |
def patfilter(names, pat):
|
|
254 |
"""
|
|
255 |
Return the subset of the list NAMES that match PAT.
|
|
256 |
Adapted from fnmatch module.
|
|
257 |
"""
|
|
258 |
result = []
|
|
259 |
if pat not in _pat_cache:
|
|
260 |
_pat_cache[pat] = re.compile(_translate_pattern(pat))
|
|
261 |
match = _pat_cache[pat].match
|
|
262 |
return filter(match, names)
|
|
263 |
|
|
264 |
|
|
265 |
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
|
|
266 |
|
|
267 |
def make_filename(string):
|
|
268 |
return no_fn_re.sub('', string)
|
|
269 |
|
|
270 |
|
|
271 |
def nested_parse_with_titles(state, content, node):
|
|
272 |
# hack around title style bookkeeping
|
|
273 |
surrounding_title_styles = state.memo.title_styles
|
|
274 |
surrounding_section_level = state.memo.section_level
|
|
275 |
state.memo.title_styles = []
|
|
276 |
state.memo.section_level = 0
|
|
277 |
state.nested_parse(content, 0, node, match_titles=1)
|
|
278 |
state.memo.title_styles = surrounding_title_styles
|
|
279 |
state.memo.section_level = surrounding_section_level
|
|
280 |
|
|
281 |
|
|
282 |
def ustrftime(format, *args):
|
|
283 |
# strftime for unicode strings
|
|
284 |
return time.strftime(unicode(format).encode('utf-8'), *args).decode('utf-8')
|