179
|
1 |
# -*- coding: utf-8 -*-
|
|
2 |
"""
|
|
3 |
sphinx.environment
|
|
4 |
~~~~~~~~~~~~~~~~~~
|
|
5 |
|
|
6 |
Global creation environment.
|
|
7 |
|
|
8 |
:copyright: 2007-2008 by Georg Brandl.
|
|
9 |
:license: BSD.
|
|
10 |
"""
|
|
11 |
|
|
12 |
import re
|
|
13 |
import os
|
|
14 |
import time
|
|
15 |
import heapq
|
|
16 |
import types
|
|
17 |
import imghdr
|
|
18 |
import difflib
|
|
19 |
import cPickle as pickle
|
|
20 |
from os import path
|
|
21 |
from glob import glob
|
|
22 |
from string import uppercase
|
|
23 |
from itertools import izip, groupby
|
|
24 |
try:
|
|
25 |
import hashlib
|
|
26 |
md5 = hashlib.md5
|
|
27 |
except ImportError:
|
|
28 |
# 2.4 compatibility
|
|
29 |
import md5
|
|
30 |
md5 = md5.new
|
|
31 |
|
|
32 |
from docutils import nodes
|
|
33 |
from docutils.io import FileInput, NullOutput
|
|
34 |
from docutils.core import Publisher
|
|
35 |
from docutils.utils import Reporter, relative_path
|
|
36 |
from docutils.readers import standalone
|
|
37 |
from docutils.parsers.rst import roles
|
|
38 |
from docutils.parsers.rst.languages import en as english
|
|
39 |
from docutils.parsers.rst.directives.html import MetaBody
|
|
40 |
from docutils.writers import UnfilteredWriter
|
|
41 |
from docutils.transforms import Transform
|
|
42 |
from docutils.transforms.parts import ContentsFilter
|
|
43 |
|
|
44 |
from sphinx import addnodes
|
|
45 |
from sphinx.util import get_matching_docs, SEP, ustrftime
|
|
46 |
from sphinx.directives import additional_xref_types
|
|
47 |
|
|
48 |
default_settings = {
|
|
49 |
'embed_stylesheet': False,
|
|
50 |
'cloak_email_addresses': True,
|
|
51 |
'pep_base_url': 'http://www.python.org/dev/peps/',
|
|
52 |
'rfc_base_url': 'http://rfc.net/',
|
|
53 |
'input_encoding': 'utf-8',
|
|
54 |
'doctitle_xform': False,
|
|
55 |
'sectsubtitle_xform': False,
|
|
56 |
}
|
|
57 |
|
|
58 |
# This is increased every time an environment attribute is added
|
|
59 |
# or changed to properly invalidate pickle files.
|
|
60 |
ENV_VERSION = 26
|
|
61 |
|
|
62 |
|
|
63 |
default_substitutions = set([
|
|
64 |
'version',
|
|
65 |
'release',
|
|
66 |
'today',
|
|
67 |
])
|
|
68 |
|
|
69 |
dummy_reporter = Reporter('', 4, 4)
|
|
70 |
|
|
71 |
|
|
72 |
class RedirStream(object):
|
|
73 |
def __init__(self, writefunc):
|
|
74 |
self.writefunc = writefunc
|
|
75 |
def write(self, text):
|
|
76 |
if text.strip():
|
|
77 |
self.writefunc(text)
|
|
78 |
|
|
79 |
|
|
80 |
class NoUri(Exception):
|
|
81 |
"""Raised by get_relative_uri if there is no URI available."""
|
|
82 |
pass
|
|
83 |
|
|
84 |
|
|
85 |
class DefaultSubstitutions(Transform):
|
|
86 |
"""
|
|
87 |
Replace some substitutions if they aren't defined in the document.
|
|
88 |
"""
|
|
89 |
# run before the default Substitutions
|
|
90 |
default_priority = 210
|
|
91 |
|
|
92 |
def apply(self):
|
|
93 |
config = self.document.settings.env.config
|
|
94 |
# only handle those not otherwise defined in the document
|
|
95 |
to_handle = default_substitutions - set(self.document.substitution_defs)
|
|
96 |
for ref in self.document.traverse(nodes.substitution_reference):
|
|
97 |
refname = ref['refname']
|
|
98 |
if refname in to_handle:
|
|
99 |
text = config[refname]
|
|
100 |
if refname == 'today' and not text:
|
|
101 |
# special handling: can also specify a strftime format
|
|
102 |
text = ustrftime(config.today_fmt or _('%B %d, %Y'))
|
|
103 |
ref.replace_self(nodes.Text(text, text))
|
|
104 |
|
|
105 |
|
|
106 |
class MoveModuleTargets(Transform):
|
|
107 |
"""
|
|
108 |
Move module targets to their nearest enclosing section title.
|
|
109 |
"""
|
|
110 |
default_priority = 210
|
|
111 |
|
|
112 |
def apply(self):
|
|
113 |
for node in self.document.traverse(nodes.target):
|
|
114 |
if not node['ids']:
|
|
115 |
continue
|
|
116 |
if node['ids'][0].startswith('module-') and \
|
|
117 |
node.parent.__class__ is nodes.section:
|
|
118 |
node.parent['ids'] = node['ids']
|
|
119 |
node.parent.remove(node)
|
|
120 |
|
|
121 |
|
|
122 |
class HandleCodeBlocks(Transform):
|
|
123 |
"""
|
|
124 |
Move doctest blocks out of blockquotes.
|
|
125 |
"""
|
|
126 |
default_priority = 210
|
|
127 |
|
|
128 |
def apply(self):
|
|
129 |
for node in self.document.traverse(nodes.block_quote):
|
|
130 |
if len(node.children) == 1 and isinstance(node.children[0],
|
|
131 |
nodes.doctest_block):
|
|
132 |
node.replace_self(node.children[0])
|
|
133 |
|
|
134 |
class CitationReferences(Transform):
|
|
135 |
"""
|
|
136 |
Handle citation references before the default docutils transform does.
|
|
137 |
"""
|
|
138 |
default_priority = 619
|
|
139 |
|
|
140 |
def apply(self):
|
|
141 |
for citnode in self.document.traverse(nodes.citation_reference):
|
|
142 |
cittext = citnode.astext()
|
|
143 |
refnode = addnodes.pending_xref(cittext, reftype='citation',
|
|
144 |
reftarget=cittext)
|
|
145 |
refnode += nodes.Text('[' + cittext + ']')
|
|
146 |
citnode.parent.replace(citnode, refnode)
|
|
147 |
|
|
148 |
|
|
149 |
class SphinxStandaloneReader(standalone.Reader):
|
|
150 |
"""
|
|
151 |
Add our own transforms.
|
|
152 |
"""
|
|
153 |
transforms = [CitationReferences, DefaultSubstitutions, MoveModuleTargets,
|
|
154 |
HandleCodeBlocks]
|
|
155 |
|
|
156 |
def get_transforms(self):
|
|
157 |
return standalone.Reader.get_transforms(self) + self.transforms
|
|
158 |
|
|
159 |
|
|
160 |
class SphinxDummyWriter(UnfilteredWriter):
|
|
161 |
supported = ('html',) # needed to keep "meta" nodes
|
|
162 |
|
|
163 |
def translate(self):
|
|
164 |
pass
|
|
165 |
|
|
166 |
|
|
167 |
|
|
168 |
class SphinxContentsFilter(ContentsFilter):
|
|
169 |
"""
|
|
170 |
Used with BuildEnvironment.add_toc_from() to discard cross-file links
|
|
171 |
within table-of-contents link nodes.
|
|
172 |
"""
|
|
173 |
def visit_pending_xref(self, node):
|
|
174 |
text = node.astext()
|
|
175 |
self.parent.append(nodes.literal(text, text))
|
|
176 |
raise nodes.SkipNode
|
|
177 |
|
|
178 |
|
|
179 |
class BuildEnvironment:
|
|
180 |
"""
|
|
181 |
The environment in which the ReST files are translated.
|
|
182 |
Stores an inventory of cross-file targets and provides doctree
|
|
183 |
transformations to resolve links to them.
|
|
184 |
"""
|
|
185 |
|
|
186 |
# --------- ENVIRONMENT PERSISTENCE ----------------------------------------
|
|
187 |
|
|
188 |
@staticmethod
|
|
189 |
def frompickle(config, filename):
|
|
190 |
picklefile = open(filename, 'rb')
|
|
191 |
try:
|
|
192 |
env = pickle.load(picklefile)
|
|
193 |
finally:
|
|
194 |
picklefile.close()
|
|
195 |
env.config.values = config.values
|
|
196 |
if env.version != ENV_VERSION:
|
|
197 |
raise IOError('env version not current')
|
|
198 |
return env
|
|
199 |
|
|
200 |
def topickle(self, filename):
|
|
201 |
# remove unpicklable attributes
|
|
202 |
warnfunc = self._warnfunc
|
|
203 |
self.set_warnfunc(None)
|
|
204 |
values = self.config.values
|
|
205 |
del self.config.values
|
|
206 |
picklefile = open(filename, 'wb')
|
|
207 |
# remove potentially pickling-problematic values from config
|
|
208 |
for key, val in vars(self.config).items():
|
|
209 |
if key.startswith('_') or \
|
|
210 |
isinstance(val, types.ModuleType) or \
|
|
211 |
isinstance(val, types.FunctionType) or \
|
|
212 |
isinstance(val, (type, types.ClassType)):
|
|
213 |
del self.config[key]
|
|
214 |
try:
|
|
215 |
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
|
|
216 |
finally:
|
|
217 |
picklefile.close()
|
|
218 |
# reset attributes
|
|
219 |
self.config.values = values
|
|
220 |
self.set_warnfunc(warnfunc)
|
|
221 |
|
|
222 |
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
|
|
223 |
|
|
224 |
def __init__(self, srcdir, doctreedir, config):
|
|
225 |
self.doctreedir = doctreedir
|
|
226 |
self.srcdir = srcdir
|
|
227 |
self.config = config
|
|
228 |
|
|
229 |
# the application object; only set while update() runs
|
|
230 |
self.app = None
|
|
231 |
|
|
232 |
# the docutils settings for building
|
|
233 |
self.settings = default_settings.copy()
|
|
234 |
self.settings['env'] = self
|
|
235 |
|
|
236 |
# the function to write warning messages with
|
|
237 |
self._warnfunc = None
|
|
238 |
|
|
239 |
# this is to invalidate old pickles
|
|
240 |
self.version = ENV_VERSION
|
|
241 |
|
|
242 |
# All "docnames" here are /-separated and relative and exclude the source suffix.
|
|
243 |
|
|
244 |
self.found_docs = set() # contains all existing docnames
|
|
245 |
self.all_docs = {} # docname -> mtime at the time of build
|
|
246 |
# contains all built docnames
|
|
247 |
self.dependencies = {} # docname -> set of dependent file names, relative to
|
|
248 |
# documentation root
|
|
249 |
|
|
250 |
# File metadata
|
|
251 |
self.metadata = {} # docname -> dict of metadata items
|
|
252 |
|
|
253 |
# TOC inventory
|
|
254 |
self.titles = {} # docname -> title node
|
|
255 |
self.tocs = {} # docname -> table of contents nodetree
|
|
256 |
self.toc_num_entries = {} # docname -> number of real entries
|
|
257 |
# used to determine when to show the TOC in a sidebar
|
|
258 |
# (don't show if it's only one item)
|
|
259 |
self.toctree_includes = {} # docname -> list of toctree includefiles
|
|
260 |
self.files_to_rebuild = {} # docname -> set of files (containing its TOCs)
|
|
261 |
# to rebuild too
|
|
262 |
self.glob_toctrees = set() # docnames that have :glob: toctrees
|
|
263 |
|
|
264 |
# X-ref target inventory
|
|
265 |
self.descrefs = {} # fullname -> docname, desctype
|
|
266 |
self.filemodules = {} # docname -> [modules]
|
|
267 |
self.modules = {} # modname -> docname, synopsis, platform, deprecated
|
|
268 |
self.labels = {} # labelname -> docname, labelid, sectionname
|
|
269 |
self.anonlabels = {} # labelname -> docname, labelid
|
|
270 |
self.progoptions = {} # (program, name) -> docname, labelid
|
|
271 |
self.reftargets = {} # (type, name) -> docname, labelid
|
|
272 |
# where type is term, token, envvar, citation
|
|
273 |
|
|
274 |
# Other inventories
|
|
275 |
self.indexentries = {} # docname -> list of
|
|
276 |
# (type, string, target, aliasname)
|
|
277 |
self.versionchanges = {} # version -> list of
|
|
278 |
# (type, docname, lineno, module, descname, content)
|
|
279 |
self.images = {} # absolute path -> (docnames, unique filename)
|
|
280 |
|
|
281 |
# These are set while parsing a file
|
|
282 |
self.docname = None # current document name
|
|
283 |
self.currmodule = None # current module name
|
|
284 |
self.currclass = None # current class name
|
|
285 |
self.currdesc = None # current descref name
|
|
286 |
self.currprogram = None # current program name
|
|
287 |
self.index_num = 0 # autonumber for index targets
|
|
288 |
self.gloss_entries = set() # existing definition labels
|
|
289 |
|
|
290 |
# Some magically present labels
|
|
291 |
self.labels['genindex'] = ('genindex', '', _('Index'))
|
|
292 |
self.labels['modindex'] = ('modindex', '', _('Module Index'))
|
|
293 |
self.labels['search'] = ('search', '', _('Search Page'))
|
|
294 |
|
|
295 |
def set_warnfunc(self, func):
|
|
296 |
self._warnfunc = func
|
|
297 |
self.settings['warning_stream'] = RedirStream(func)
|
|
298 |
|
|
299 |
def warn(self, docname, msg, lineno=None):
|
|
300 |
if docname:
|
|
301 |
if lineno is None:
|
|
302 |
lineno = ''
|
|
303 |
self._warnfunc('%s:%s: %s' % (self.doc2path(docname), lineno, msg))
|
|
304 |
else:
|
|
305 |
self._warnfunc('GLOBAL:: ' + msg)
|
|
306 |
|
|
307 |
def clear_doc(self, docname):
|
|
308 |
"""Remove all traces of a source file in the inventory."""
|
|
309 |
if docname in self.all_docs:
|
|
310 |
self.all_docs.pop(docname, None)
|
|
311 |
self.metadata.pop(docname, None)
|
|
312 |
self.dependencies.pop(docname, None)
|
|
313 |
self.titles.pop(docname, None)
|
|
314 |
self.tocs.pop(docname, None)
|
|
315 |
self.toc_num_entries.pop(docname, None)
|
|
316 |
self.toctree_includes.pop(docname, None)
|
|
317 |
self.filemodules.pop(docname, None)
|
|
318 |
self.indexentries.pop(docname, None)
|
|
319 |
self.glob_toctrees.discard(docname)
|
|
320 |
|
|
321 |
for subfn, fnset in self.files_to_rebuild.items():
|
|
322 |
fnset.discard(docname)
|
|
323 |
if not fnset:
|
|
324 |
del self.files_to_rebuild[subfn]
|
|
325 |
for fullname, (fn, _) in self.descrefs.items():
|
|
326 |
if fn == docname:
|
|
327 |
del self.descrefs[fullname]
|
|
328 |
for modname, (fn, _, _, _) in self.modules.items():
|
|
329 |
if fn == docname:
|
|
330 |
del self.modules[modname]
|
|
331 |
for labelname, (fn, _, _) in self.labels.items():
|
|
332 |
if fn == docname:
|
|
333 |
del self.labels[labelname]
|
|
334 |
for key, (fn, _) in self.reftargets.items():
|
|
335 |
if fn == docname:
|
|
336 |
del self.reftargets[key]
|
|
337 |
for key, (fn, _) in self.progoptions.items():
|
|
338 |
if fn == docname:
|
|
339 |
del self.progoptions[key]
|
|
340 |
for version, changes in self.versionchanges.items():
|
|
341 |
new = [change for change in changes if change[1] != docname]
|
|
342 |
changes[:] = new
|
|
343 |
for fullpath, (docs, _) in self.images.items():
|
|
344 |
docs.discard(docname)
|
|
345 |
if not docs:
|
|
346 |
del self.images[fullpath]
|
|
347 |
|
|
348 |
def doc2path(self, docname, base=True, suffix=None):
|
|
349 |
"""
|
|
350 |
Return the filename for the document name.
|
|
351 |
If base is True, return absolute path under self.srcdir.
|
|
352 |
If base is None, return relative path to self.srcdir.
|
|
353 |
If base is a path string, return absolute path under that.
|
|
354 |
If suffix is not None, add it instead of config.source_suffix.
|
|
355 |
"""
|
|
356 |
suffix = suffix or self.config.source_suffix
|
|
357 |
if base is True:
|
|
358 |
return path.join(self.srcdir, docname.replace(SEP, path.sep)) + suffix
|
|
359 |
elif base is None:
|
|
360 |
return docname.replace(SEP, path.sep) + suffix
|
|
361 |
else:
|
|
362 |
return path.join(base, docname.replace(SEP, path.sep)) + suffix
|
|
363 |
|
|
364 |
def find_files(self, config):
|
|
365 |
"""
|
|
366 |
Find all source files in the source dir and put them in self.found_docs.
|
|
367 |
"""
|
|
368 |
exclude_dirs = [d.replace(SEP, path.sep) for d in config.exclude_dirs]
|
|
369 |
exclude_trees = [d.replace(SEP, path.sep) for d in config.exclude_trees]
|
|
370 |
self.found_docs = set(get_matching_docs(
|
|
371 |
self.srcdir, config.source_suffix, exclude_docs=set(config.unused_docs),
|
|
372 |
exclude_dirs=exclude_dirs, exclude_trees=exclude_trees,
|
|
373 |
exclude_dirnames=['_sources'] + config.exclude_dirnames))
|
|
374 |
|
|
375 |
def get_outdated_files(self, config_changed):
|
|
376 |
"""
|
|
377 |
Return (added, changed, removed) sets.
|
|
378 |
"""
|
|
379 |
# clear all files no longer present
|
|
380 |
removed = set(self.all_docs) - self.found_docs
|
|
381 |
|
|
382 |
added = set()
|
|
383 |
changed = set()
|
|
384 |
|
|
385 |
if config_changed:
|
|
386 |
# config values affect e.g. substitutions
|
|
387 |
added = self.found_docs
|
|
388 |
else:
|
|
389 |
for docname in self.found_docs:
|
|
390 |
if docname not in self.all_docs:
|
|
391 |
added.add(docname)
|
|
392 |
continue
|
|
393 |
# if the doctree file is not there, rebuild
|
|
394 |
if not path.isfile(self.doc2path(docname, self.doctreedir,
|
|
395 |
'.doctree')):
|
|
396 |
changed.add(docname)
|
|
397 |
continue
|
|
398 |
# check the mtime of the document
|
|
399 |
mtime = self.all_docs[docname]
|
|
400 |
newmtime = path.getmtime(self.doc2path(docname))
|
|
401 |
if newmtime > mtime:
|
|
402 |
changed.add(docname)
|
|
403 |
continue
|
|
404 |
# finally, check the mtime of dependencies
|
|
405 |
for dep in self.dependencies.get(docname, ()):
|
|
406 |
try:
|
|
407 |
# this will do the right thing when dep is absolute too
|
|
408 |
deppath = path.join(self.srcdir, dep)
|
|
409 |
if not path.isfile(deppath):
|
|
410 |
changed.add(docname)
|
|
411 |
break
|
|
412 |
depmtime = path.getmtime(deppath)
|
|
413 |
if depmtime > mtime:
|
|
414 |
changed.add(docname)
|
|
415 |
break
|
|
416 |
except EnvironmentError:
|
|
417 |
# give it another chance
|
|
418 |
changed.add(docname)
|
|
419 |
break
|
|
420 |
|
|
421 |
return added, changed, removed
|
|
422 |
|
|
423 |
def update(self, config, srcdir, doctreedir, app=None):
|
|
424 |
"""(Re-)read all files new or changed since last update. Yields a summary
|
|
425 |
and then docnames as it processes them. Store all environment docnames
|
|
426 |
in the canonical format (ie using SEP as a separator in place of
|
|
427 |
os.path.sep)."""
|
|
428 |
config_changed = False
|
|
429 |
if self.config is None:
|
|
430 |
msg = '[new config] '
|
|
431 |
config_changed = True
|
|
432 |
else:
|
|
433 |
# check if a config value was changed that affects how doctrees are read
|
|
434 |
for key, descr in config.config_values.iteritems():
|
|
435 |
if not descr[1]:
|
|
436 |
continue
|
|
437 |
if self.config[key] != config[key]:
|
|
438 |
msg = '[config changed] '
|
|
439 |
config_changed = True
|
|
440 |
break
|
|
441 |
else:
|
|
442 |
msg = ''
|
|
443 |
# this value is not covered by the above loop because it is handled
|
|
444 |
# specially by the config class
|
|
445 |
if self.config.extensions != config.extensions:
|
|
446 |
msg = '[extensions changed] '
|
|
447 |
config_changed = True
|
|
448 |
# the source and doctree directories may have been relocated
|
|
449 |
self.srcdir = srcdir
|
|
450 |
self.doctreedir = doctreedir
|
|
451 |
self.find_files(config)
|
|
452 |
|
|
453 |
added, changed, removed = self.get_outdated_files(config_changed)
|
|
454 |
|
|
455 |
# if files were added or removed, all documents with globbed toctrees
|
|
456 |
# must be reread
|
|
457 |
if added or removed:
|
|
458 |
changed.update(self.glob_toctrees)
|
|
459 |
|
|
460 |
msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
|
|
461 |
len(removed))
|
|
462 |
yield msg
|
|
463 |
|
|
464 |
self.config = config
|
|
465 |
self.app = app
|
|
466 |
|
|
467 |
# clear all files no longer present
|
|
468 |
for docname in removed:
|
|
469 |
if app:
|
|
470 |
app.emit('env-purge-doc', self, docname)
|
|
471 |
self.clear_doc(docname)
|
|
472 |
|
|
473 |
# read all new and changed files
|
|
474 |
for docname in sorted(added | changed):
|
|
475 |
yield docname
|
|
476 |
self.read_doc(docname, app=app)
|
|
477 |
|
|
478 |
if config.master_doc not in self.all_docs:
|
|
479 |
self.warn(None, 'master file %s not found' %
|
|
480 |
self.doc2path(config.master_doc))
|
|
481 |
|
|
482 |
self.app = None
|
|
483 |
|
|
484 |
# remove all non-existing images from inventory
|
|
485 |
for imgsrc in self.images.keys():
|
|
486 |
if not os.access(path.join(self.srcdir, imgsrc), os.R_OK):
|
|
487 |
del self.images[imgsrc]
|
|
488 |
|
|
489 |
if app:
|
|
490 |
app.emit('env-updated', self)
|
|
491 |
|
|
492 |
|
|
493 |
# --------- SINGLE FILE READING --------------------------------------------
|
|
494 |
|
|
495 |
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
|
|
496 |
"""
|
|
497 |
Parse a file and add/update inventory entries for the doctree.
|
|
498 |
If srcpath is given, read from a different source file.
|
|
499 |
"""
|
|
500 |
# remove all inventory entries for that file
|
|
501 |
if app:
|
|
502 |
app.emit('env-purge-doc', self, docname)
|
|
503 |
self.clear_doc(docname)
|
|
504 |
|
|
505 |
if src_path is None:
|
|
506 |
src_path = self.doc2path(docname)
|
|
507 |
|
|
508 |
if self.config.default_role:
|
|
509 |
role_fn, messages = roles.role(self.config.default_role, english,
|
|
510 |
0, dummy_reporter)
|
|
511 |
if role_fn:
|
|
512 |
roles._roles[''] = role_fn
|
|
513 |
else:
|
|
514 |
self.warn(docname, 'default role %s not found' %
|
|
515 |
self.config.default_role)
|
|
516 |
|
|
517 |
self.docname = docname
|
|
518 |
self.settings['input_encoding'] = self.config.source_encoding
|
|
519 |
|
|
520 |
class SphinxSourceClass(FileInput):
|
|
521 |
def read(self):
|
|
522 |
data = FileInput.read(self)
|
|
523 |
if app:
|
|
524 |
arg = [data]
|
|
525 |
app.emit('source-read', docname, arg)
|
|
526 |
data = arg[0]
|
|
527 |
return data
|
|
528 |
|
|
529 |
# publish manually
|
|
530 |
pub = Publisher(reader=SphinxStandaloneReader(),
|
|
531 |
writer=SphinxDummyWriter(),
|
|
532 |
source_class=SphinxSourceClass,
|
|
533 |
destination_class=NullOutput)
|
|
534 |
pub.set_components(None, 'restructuredtext', None)
|
|
535 |
pub.process_programmatic_settings(None, self.settings, None)
|
|
536 |
pub.set_source(None, src_path)
|
|
537 |
pub.set_destination(None, None)
|
|
538 |
try:
|
|
539 |
pub.publish()
|
|
540 |
doctree = pub.document
|
|
541 |
except UnicodeError, err:
|
|
542 |
from sphinx.application import SphinxError
|
|
543 |
raise SphinxError(err.message)
|
|
544 |
self.filter_messages(doctree)
|
|
545 |
self.process_dependencies(docname, doctree)
|
|
546 |
self.process_images(docname, doctree)
|
|
547 |
self.process_metadata(docname, doctree)
|
|
548 |
self.create_title_from(docname, doctree)
|
|
549 |
self.note_labels_from(docname, doctree)
|
|
550 |
self.note_indexentries_from(docname, doctree)
|
|
551 |
self.note_citations_from(docname, doctree)
|
|
552 |
self.build_toc_from(docname, doctree)
|
|
553 |
|
|
554 |
# store time of reading, used to find outdated files
|
|
555 |
self.all_docs[docname] = time.time()
|
|
556 |
|
|
557 |
if app:
|
|
558 |
app.emit('doctree-read', doctree)
|
|
559 |
|
|
560 |
# make it picklable
|
|
561 |
doctree.reporter = None
|
|
562 |
doctree.transformer = None
|
|
563 |
doctree.settings.warning_stream = None
|
|
564 |
doctree.settings.env = None
|
|
565 |
doctree.settings.record_dependencies = None
|
|
566 |
for metanode in doctree.traverse(MetaBody.meta):
|
|
567 |
# docutils' meta nodes aren't picklable because the class is nested
|
|
568 |
metanode.__class__ = addnodes.meta
|
|
569 |
|
|
570 |
# cleanup
|
|
571 |
self.docname = None
|
|
572 |
self.currmodule = None
|
|
573 |
self.currclass = None
|
|
574 |
self.gloss_entries = set()
|
|
575 |
|
|
576 |
if save_parsed:
|
|
577 |
# save the parsed doctree
|
|
578 |
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
|
|
579 |
dirname = path.dirname(doctree_filename)
|
|
580 |
if not path.isdir(dirname):
|
|
581 |
os.makedirs(dirname)
|
|
582 |
f = open(doctree_filename, 'wb')
|
|
583 |
try:
|
|
584 |
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
|
|
585 |
finally:
|
|
586 |
f.close()
|
|
587 |
else:
|
|
588 |
return doctree
|
|
589 |
|
|
590 |
def filter_messages(self, doctree):
|
|
591 |
"""
|
|
592 |
Filter system messages from a doctree.
|
|
593 |
"""
|
|
594 |
filterlevel = self.config.keep_warnings and 2 or 5
|
|
595 |
for node in doctree.traverse(nodes.system_message):
|
|
596 |
if node['level'] < filterlevel:
|
|
597 |
node.parent.remove(node)
|
|
598 |
|
|
599 |
def process_dependencies(self, docname, doctree):
|
|
600 |
"""
|
|
601 |
Process docutils-generated dependency info.
|
|
602 |
"""
|
|
603 |
deps = doctree.settings.record_dependencies
|
|
604 |
if not deps:
|
|
605 |
return
|
|
606 |
docdir = path.dirname(self.doc2path(docname, base=None))
|
|
607 |
for dep in deps.list:
|
|
608 |
dep = path.join(docdir, dep)
|
|
609 |
self.dependencies.setdefault(docname, set()).add(dep)
|
|
610 |
|
|
611 |
def process_images(self, docname, doctree):
|
|
612 |
"""
|
|
613 |
Process and rewrite image URIs.
|
|
614 |
"""
|
|
615 |
existing_names = set(v[1] for v in self.images.itervalues())
|
|
616 |
docdir = path.dirname(self.doc2path(docname, base=None))
|
|
617 |
for node in doctree.traverse(nodes.image):
|
|
618 |
# Map the mimetype to the corresponding image. The writer may
|
|
619 |
# choose the best image from these candidates. The special key * is
|
|
620 |
# set if there is only single candiate to be used by a writer.
|
|
621 |
# The special key ? is set for nonlocal URIs.
|
|
622 |
node['candidates'] = candidates = {}
|
|
623 |
imguri = node['uri']
|
|
624 |
if imguri.find('://') != -1:
|
|
625 |
self.warn(docname, 'Nonlocal image URI found: %s' % imguri, node.line)
|
|
626 |
candidates['?'] = imguri
|
|
627 |
continue
|
|
628 |
# imgpath is the image path *from srcdir*
|
|
629 |
imgpath = path.normpath(path.join(docdir, imguri))
|
|
630 |
# set imgpath as default URI
|
|
631 |
node['uri'] = imgpath
|
|
632 |
if imgpath.endswith(os.extsep + '*'):
|
|
633 |
for filename in glob(path.join(self.srcdir, imgpath)):
|
|
634 |
new_imgpath = relative_path(self.srcdir, filename)
|
|
635 |
if filename.lower().endswith('.pdf'):
|
|
636 |
candidates['application/pdf'] = new_imgpath
|
|
637 |
elif filename.lower().endswith('.svg'):
|
|
638 |
candidates['image/svg+xml'] = new_imgpath
|
|
639 |
else:
|
|
640 |
try:
|
|
641 |
f = open(filename, 'rb')
|
|
642 |
try:
|
|
643 |
imgtype = imghdr.what(f)
|
|
644 |
finally:
|
|
645 |
f.close()
|
|
646 |
except (OSError, IOError):
|
|
647 |
self.warn(docname, 'Image file %s not readable' % filename)
|
|
648 |
if imgtype:
|
|
649 |
candidates['image/' + imgtype] = new_imgpath
|
|
650 |
else:
|
|
651 |
candidates['*'] = imgpath
|
|
652 |
# map image paths to unique image names (so that they can be put
|
|
653 |
# into a single directory)
|
|
654 |
for imgpath in candidates.itervalues():
|
|
655 |
self.dependencies.setdefault(docname, set()).add(imgpath)
|
|
656 |
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
|
|
657 |
self.warn(docname, 'Image file not readable: %s' % imgpath,
|
|
658 |
node.line)
|
|
659 |
if imgpath in self.images:
|
|
660 |
self.images[imgpath][0].add(docname)
|
|
661 |
continue
|
|
662 |
uniquename = path.basename(imgpath)
|
|
663 |
base, ext = path.splitext(uniquename)
|
|
664 |
i = 0
|
|
665 |
while uniquename in existing_names:
|
|
666 |
i += 1
|
|
667 |
uniquename = '%s%s%s' % (base, i, ext)
|
|
668 |
self.images[imgpath] = (set([docname]), uniquename)
|
|
669 |
existing_names.add(uniquename)
|
|
670 |
|
|
671 |
def process_metadata(self, docname, doctree):
|
|
672 |
"""
|
|
673 |
Process the docinfo part of the doctree as metadata.
|
|
674 |
"""
|
|
675 |
self.metadata[docname] = md = {}
|
|
676 |
try:
|
|
677 |
docinfo = doctree[0]
|
|
678 |
except IndexError:
|
|
679 |
# probably an empty document
|
|
680 |
return
|
|
681 |
if docinfo.__class__ is not nodes.docinfo:
|
|
682 |
# nothing to see here
|
|
683 |
return
|
|
684 |
for node in docinfo:
|
|
685 |
if node.__class__ is nodes.author:
|
|
686 |
# handled specially by docutils
|
|
687 |
md['author'] = node.astext()
|
|
688 |
elif node.__class__ is nodes.field:
|
|
689 |
name, body = node
|
|
690 |
md[name.astext()] = body.astext()
|
|
691 |
del doctree[0]
|
|
692 |
|
|
693 |
def create_title_from(self, docname, document):
|
|
694 |
"""
|
|
695 |
Add a title node to the document (just copy the first section title),
|
|
696 |
and store that title in the environment.
|
|
697 |
"""
|
|
698 |
for node in document.traverse(nodes.section):
|
|
699 |
titlenode = nodes.title()
|
|
700 |
visitor = SphinxContentsFilter(document)
|
|
701 |
node[0].walkabout(visitor)
|
|
702 |
titlenode += visitor.get_entry_text()
|
|
703 |
self.titles[docname] = titlenode
|
|
704 |
return
|
|
705 |
|
|
706 |
def note_labels_from(self, docname, document):
|
|
707 |
for name, explicit in document.nametypes.iteritems():
|
|
708 |
if not explicit:
|
|
709 |
continue
|
|
710 |
labelid = document.nameids[name]
|
|
711 |
if labelid is None:
|
|
712 |
continue
|
|
713 |
node = document.ids[labelid]
|
|
714 |
if name.isdigit() or node.has_key('refuri') or \
|
|
715 |
node.tagname.startswith('desc_'):
|
|
716 |
# ignore footnote labels, labels automatically generated from a
|
|
717 |
# link and description units
|
|
718 |
continue
|
|
719 |
if name in self.labels:
|
|
720 |
self.warn(docname, 'duplicate label %s, ' % name +
|
|
721 |
'other instance in %s' % self.doc2path(self.labels[name][0]),
|
|
722 |
node.line)
|
|
723 |
self.anonlabels[name] = docname, labelid
|
|
724 |
if node.tagname == 'section':
|
|
725 |
sectname = node[0].astext() # node[0] == title node
|
|
726 |
elif node.tagname == 'figure':
|
|
727 |
for n in node:
|
|
728 |
if n.tagname == 'caption':
|
|
729 |
sectname = n.astext()
|
|
730 |
break
|
|
731 |
else:
|
|
732 |
continue
|
|
733 |
else:
|
|
734 |
# anonymous-only labels
|
|
735 |
continue
|
|
736 |
self.labels[name] = docname, labelid, sectname
|
|
737 |
|
|
738 |
def note_indexentries_from(self, docname, document):
|
|
739 |
entries = self.indexentries[docname] = []
|
|
740 |
for node in document.traverse(addnodes.index):
|
|
741 |
entries.extend(node['entries'])
|
|
742 |
|
|
743 |
def note_citations_from(self, docname, document):
|
|
744 |
for node in document.traverse(nodes.citation):
|
|
745 |
label = node[0].astext()
|
|
746 |
if ('citation', label) in self.reftargets:
|
|
747 |
self.warn(docname, 'duplicate citation %s, ' % label +
|
|
748 |
'other instance in %s' % self.doc2path(
|
|
749 |
self.reftargets['citation', label][0]), node.line)
|
|
750 |
self.reftargets['citation', label] = (docname, node['ids'][0])
|
|
751 |
|
|
752 |
def note_toctree(self, docname, toctreenode):
|
|
753 |
"""Note a TOC tree directive in a document and gather information about
|
|
754 |
file relations from it."""
|
|
755 |
if toctreenode['glob']:
|
|
756 |
self.glob_toctrees.add(docname)
|
|
757 |
includefiles = toctreenode['includefiles']
|
|
758 |
for includefile in includefiles:
|
|
759 |
# note that if the included file is rebuilt, this one must be
|
|
760 |
# too (since the TOC of the included file could have changed)
|
|
761 |
self.files_to_rebuild.setdefault(includefile, set()).add(docname)
|
|
762 |
self.toctree_includes.setdefault(docname, []).extend(includefiles)
|
|
763 |
|
|
764 |
def build_toc_from(self, docname, document):
|
|
765 |
"""Build a TOC from the doctree and store it in the inventory."""
|
|
766 |
numentries = [0] # nonlocal again...
|
|
767 |
|
|
768 |
try:
|
|
769 |
maxdepth = int(self.metadata[docname].get('tocdepth', 0))
|
|
770 |
except ValueError:
|
|
771 |
maxdepth = 0
|
|
772 |
|
|
773 |
def build_toc(node, depth=1):
|
|
774 |
entries = []
|
|
775 |
for subnode in node:
|
|
776 |
if isinstance(subnode, addnodes.toctree):
|
|
777 |
# just copy the toctree node which is then resolved
|
|
778 |
# in self.get_and_resolve_doctree
|
|
779 |
item = subnode.copy()
|
|
780 |
entries.append(item)
|
|
781 |
# do the inventory stuff
|
|
782 |
self.note_toctree(docname, subnode)
|
|
783 |
continue
|
|
784 |
if not isinstance(subnode, nodes.section):
|
|
785 |
continue
|
|
786 |
title = subnode[0]
|
|
787 |
# copy the contents of the section title, but without references
|
|
788 |
# and unnecessary stuff
|
|
789 |
visitor = SphinxContentsFilter(document)
|
|
790 |
title.walkabout(visitor)
|
|
791 |
nodetext = visitor.get_entry_text()
|
|
792 |
if not numentries[0]:
|
|
793 |
# for the very first toc entry, don't add an anchor
|
|
794 |
# as it is the file's title anyway
|
|
795 |
anchorname = ''
|
|
796 |
else:
|
|
797 |
anchorname = '#' + subnode['ids'][0]
|
|
798 |
numentries[0] += 1
|
|
799 |
reference = nodes.reference('', '', refuri=docname,
|
|
800 |
anchorname=anchorname,
|
|
801 |
*nodetext)
|
|
802 |
para = addnodes.compact_paragraph('', '', reference)
|
|
803 |
item = nodes.list_item('', para)
|
|
804 |
if maxdepth == 0 or depth < maxdepth:
|
|
805 |
item += build_toc(subnode, depth+1)
|
|
806 |
entries.append(item)
|
|
807 |
if entries:
|
|
808 |
return nodes.bullet_list('', *entries)
|
|
809 |
return []
|
|
810 |
toc = build_toc(document)
|
|
811 |
if toc:
|
|
812 |
self.tocs[docname] = toc
|
|
813 |
else:
|
|
814 |
self.tocs[docname] = nodes.bullet_list('')
|
|
815 |
self.toc_num_entries[docname] = numentries[0]
|
|
816 |
|
|
817 |
def get_toc_for(self, docname):
|
|
818 |
"""Return a TOC nodetree -- for use on the same page only!"""
|
|
819 |
toc = self.tocs[docname].deepcopy()
|
|
820 |
for node in toc.traverse(nodes.reference):
|
|
821 |
node['refuri'] = node['anchorname']
|
|
822 |
return toc
|
|
823 |
|
|
824 |
# -------
|
|
825 |
# these are called from docutils directives and therefore use self.docname
|
|
826 |
#
|
|
827 |
def note_descref(self, fullname, desctype, line):
|
|
828 |
if fullname in self.descrefs:
|
|
829 |
self.warn(self.docname,
|
|
830 |
'duplicate canonical description name %s, ' % fullname +
|
|
831 |
'other instance in %s' % self.doc2path(self.descrefs[fullname][0]),
|
|
832 |
line)
|
|
833 |
self.descrefs[fullname] = (self.docname, desctype)
|
|
834 |
|
|
835 |
def note_module(self, modname, synopsis, platform, deprecated):
|
|
836 |
self.modules[modname] = (self.docname, synopsis, platform, deprecated)
|
|
837 |
self.filemodules.setdefault(self.docname, []).append(modname)
|
|
838 |
|
|
839 |
def note_progoption(self, optname, labelid):
|
|
840 |
self.progoptions[self.currprogram, optname] = (self.docname, labelid)
|
|
841 |
|
|
842 |
def note_reftarget(self, type, name, labelid):
|
|
843 |
self.reftargets[type, name] = (self.docname, labelid)
|
|
844 |
|
|
845 |
def note_versionchange(self, type, version, node, lineno):
|
|
846 |
self.versionchanges.setdefault(version, []).append(
|
|
847 |
(type, self.docname, lineno, self.currmodule, self.currdesc, node.astext()))
|
|
848 |
|
|
849 |
def note_dependency(self, filename):
|
|
850 |
basename = path.dirname(self.doc2path(self.docname, base=None))
|
|
851 |
# this will do the right thing when filename is absolute too
|
|
852 |
filename = path.join(basename, filename)
|
|
853 |
self.dependencies.setdefault(self.docname, set()).add(filename)
|
|
854 |
# -------
|
|
855 |
|
|
856 |
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
|
|
857 |
|
|
858 |
def get_doctree(self, docname):
|
|
859 |
"""Read the doctree for a file from the pickle and return it."""
|
|
860 |
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
|
|
861 |
f = open(doctree_filename, 'rb')
|
|
862 |
try:
|
|
863 |
doctree = pickle.load(f)
|
|
864 |
finally:
|
|
865 |
f.close()
|
|
866 |
doctree.settings.env = self
|
|
867 |
doctree.reporter = Reporter(self.doc2path(docname), 2, 4,
|
|
868 |
stream=RedirStream(self._warnfunc))
|
|
869 |
return doctree
|
|
870 |
|
|
871 |
|
|
872 |
def get_and_resolve_doctree(self, docname, builder, doctree=None,
|
|
873 |
prune_toctrees=True):
|
|
874 |
"""Read the doctree from the pickle, resolve cross-references and
|
|
875 |
toctrees and return it."""
|
|
876 |
if doctree is None:
|
|
877 |
doctree = self.get_doctree(docname)
|
|
878 |
|
|
879 |
# resolve all pending cross-references
|
|
880 |
self.resolve_references(doctree, docname, builder)
|
|
881 |
|
|
882 |
# now, resolve all toctree nodes
|
|
883 |
for toctreenode in doctree.traverse(addnodes.toctree):
|
|
884 |
result = self.resolve_toctree(docname, builder, toctreenode,
|
|
885 |
prune=prune_toctrees)
|
|
886 |
if result is None:
|
|
887 |
toctreenode.replace_self([])
|
|
888 |
else:
|
|
889 |
toctreenode.replace_self(result)
|
|
890 |
|
|
891 |
return doctree
|
|
892 |
|
|
893 |
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
|
|
894 |
titles_only=False):
|
|
895 |
"""
|
|
896 |
Resolve a *toctree* node into individual bullet lists with titles
|
|
897 |
as items, returning None (if no containing titles are found) or
|
|
898 |
a new node.
|
|
899 |
|
|
900 |
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
|
|
901 |
to the value of the *maxdepth* option on the *toctree* node.
|
|
902 |
If *titles_only* is True, only toplevel document titles will be in the
|
|
903 |
resulting tree.
|
|
904 |
"""
|
|
905 |
|
|
906 |
def _walk_depth(node, depth, maxdepth, titleoverrides):
|
|
907 |
"""Utility: Cut a TOC at a specified depth."""
|
|
908 |
for subnode in node.children[:]:
|
|
909 |
if isinstance(subnode, (addnodes.compact_paragraph, nodes.list_item)):
|
|
910 |
subnode['classes'].append('toctree-l%d' % (depth-1))
|
|
911 |
_walk_depth(subnode, depth, maxdepth, titleoverrides)
|
|
912 |
elif isinstance(subnode, nodes.bullet_list):
|
|
913 |
if maxdepth > 0 and depth > maxdepth:
|
|
914 |
subnode.parent.replace(subnode, [])
|
|
915 |
else:
|
|
916 |
_walk_depth(subnode, depth+1, maxdepth, titleoverrides)
|
|
917 |
|
|
918 |
def _entries_from_toctree(toctreenode, separate=False):
|
|
919 |
"""Return TOC entries for a toctree node."""
|
|
920 |
includefiles = map(str, toctreenode['includefiles'])
|
|
921 |
|
|
922 |
entries = []
|
|
923 |
for includefile in includefiles:
|
|
924 |
try:
|
|
925 |
toc = self.tocs[includefile].deepcopy()
|
|
926 |
if not toc.children:
|
|
927 |
# empty toc means: no titles will show up in the toctree
|
|
928 |
self.warn(docname, 'toctree contains reference to document '
|
|
929 |
'%r that doesn\'t have a title: no link will be '
|
|
930 |
'generated' % includefile)
|
|
931 |
except KeyError:
|
|
932 |
# this is raised if the included file does not exist
|
|
933 |
self.warn(docname, 'toctree contains reference to nonexisting '
|
|
934 |
'document %r' % includefile)
|
|
935 |
else:
|
|
936 |
# if titles_only is given, only keep the main title and
|
|
937 |
# sub-toctrees
|
|
938 |
if titles_only:
|
|
939 |
# delete everything but the toplevel title(s) and toctrees
|
|
940 |
for toplevel in toc:
|
|
941 |
# nodes with length 1 don't have any children anyway
|
|
942 |
if len(toplevel) > 1:
|
|
943 |
subtoctrees = toplevel.traverse(addnodes.toctree)
|
|
944 |
toplevel[1][:] = subtoctrees
|
|
945 |
# resolve all sub-toctrees
|
|
946 |
for toctreenode in toc.traverse(addnodes.toctree):
|
|
947 |
i = toctreenode.parent.index(toctreenode) + 1
|
|
948 |
for item in _entries_from_toctree(toctreenode):
|
|
949 |
toctreenode.parent.insert(i, item)
|
|
950 |
i += 1
|
|
951 |
toctreenode.parent.remove(toctreenode)
|
|
952 |
if separate:
|
|
953 |
entries.append(toc)
|
|
954 |
else:
|
|
955 |
entries.extend(toc.children)
|
|
956 |
return entries
|
|
957 |
|
|
958 |
maxdepth = maxdepth or toctree.get('maxdepth', -1)
|
|
959 |
titleoverrides = toctree.get('includetitles', {})
|
|
960 |
|
|
961 |
tocentries = _entries_from_toctree(toctree, separate=True)
|
|
962 |
if not tocentries:
|
|
963 |
return None
|
|
964 |
|
|
965 |
newnode = addnodes.compact_paragraph('', '', *tocentries)
|
|
966 |
newnode['toctree'] = True
|
|
967 |
# prune the tree to maxdepth and replace titles, also set level classes
|
|
968 |
_walk_depth(newnode, 1, prune and maxdepth or 0, titleoverrides)
|
|
969 |
# replace titles, if needed, and set the target paths in the
|
|
970 |
# toctrees (they are not known at TOC generation time)
|
|
971 |
for refnode in newnode.traverse(nodes.reference):
|
|
972 |
refnode['refuri'] = builder.get_relative_uri(
|
|
973 |
docname, refnode['refuri']) + refnode['anchorname']
|
|
974 |
if titleoverrides and not refnode['anchorname'] \
|
|
975 |
and refnode['refuri'] in titleoverrides:
|
|
976 |
newtitle = titleoverrides[refnode['refuri']]
|
|
977 |
refnode.children = [nodes.Text(newtitle)]
|
|
978 |
return newnode
|
|
979 |
|
|
980 |
descroles = frozenset(('data', 'exc', 'func', 'class', 'const', 'attr', 'obj',
|
|
981 |
'meth', 'cfunc', 'cmember', 'cdata', 'ctype', 'cmacro'))
|
|
982 |
|
|
983 |
def resolve_references(self, doctree, fromdocname, builder):
|
|
984 |
reftarget_roles = set(('token', 'term', 'citation'))
|
|
985 |
# add all custom xref types too
|
|
986 |
reftarget_roles.update(i[0] for i in additional_xref_types.values())
|
|
987 |
|
|
988 |
for node in doctree.traverse(addnodes.pending_xref):
|
|
989 |
contnode = node[0].deepcopy()
|
|
990 |
newnode = None
|
|
991 |
|
|
992 |
typ = node['reftype']
|
|
993 |
target = node['reftarget']
|
|
994 |
|
|
995 |
try:
|
|
996 |
if typ == 'ref':
|
|
997 |
if node['refcaption']:
|
|
998 |
# reference to anonymous label; the reference uses the supplied
|
|
999 |
# link caption
|
|
1000 |
docname, labelid = self.anonlabels.get(target, ('',''))
|
|
1001 |
sectname = node.astext()
|
|
1002 |
if not docname:
|
|
1003 |
newnode = doctree.reporter.system_message(
|
|
1004 |
2, 'undefined label: %s' % target)
|
|
1005 |
else:
|
|
1006 |
# reference to the named label; the final node will contain the
|
|
1007 |
# section name after the label
|
|
1008 |
docname, labelid, sectname = self.labels.get(target, ('','',''))
|
|
1009 |
if not docname:
|
|
1010 |
newnode = doctree.reporter.system_message(
|
|
1011 |
2, 'undefined label: %s -- if you don\'t ' % target +
|
|
1012 |
'give a link caption the label must precede a section '
|
|
1013 |
'header.')
|
|
1014 |
if docname:
|
|
1015 |
newnode = nodes.reference('', '')
|
|
1016 |
innernode = nodes.emphasis(sectname, sectname)
|
|
1017 |
if docname == fromdocname:
|
|
1018 |
newnode['refid'] = labelid
|
|
1019 |
else:
|
|
1020 |
# set more info in contnode in case the get_relative_uri call
|
|
1021 |
# raises NoUri, the builder will then have to resolve these
|
|
1022 |
contnode = addnodes.pending_xref('')
|
|
1023 |
contnode['refdocname'] = docname
|
|
1024 |
contnode['refsectname'] = sectname
|
|
1025 |
newnode['refuri'] = builder.get_relative_uri(
|
|
1026 |
fromdocname, docname)
|
|
1027 |
if labelid:
|
|
1028 |
newnode['refuri'] += '#' + labelid
|
|
1029 |
newnode.append(innernode)
|
|
1030 |
elif typ == 'keyword':
|
|
1031 |
# keywords are referenced by named labels
|
|
1032 |
docname, labelid, _ = self.labels.get(target, ('','',''))
|
|
1033 |
if not docname:
|
|
1034 |
#self.warn(fromdocname, 'unknown keyword: %s' % target)
|
|
1035 |
newnode = contnode
|
|
1036 |
else:
|
|
1037 |
newnode = nodes.reference('', '')
|
|
1038 |
if docname == fromdocname:
|
|
1039 |
newnode['refid'] = labelid
|
|
1040 |
else:
|
|
1041 |
newnode['refuri'] = builder.get_relative_uri(
|
|
1042 |
fromdocname, docname) + '#' + labelid
|
|
1043 |
newnode.append(contnode)
|
|
1044 |
elif typ == 'option':
|
|
1045 |
progname = node['refprogram']
|
|
1046 |
docname, labelid = self.progoptions.get((progname, target), ('', ''))
|
|
1047 |
if not docname:
|
|
1048 |
newnode = contnode
|
|
1049 |
else:
|
|
1050 |
newnode = nodes.reference('', '')
|
|
1051 |
if docname == fromdocname:
|
|
1052 |
newnode['refid'] = labelid
|
|
1053 |
else:
|
|
1054 |
newnode['refuri'] = builder.get_relative_uri(
|
|
1055 |
fromdocname, docname) + '#' + labelid
|
|
1056 |
newnode.append(contnode)
|
|
1057 |
elif typ in reftarget_roles:
|
|
1058 |
docname, labelid = self.reftargets.get((typ, target), ('', ''))
|
|
1059 |
if not docname:
|
|
1060 |
if typ == 'term':
|
|
1061 |
self.warn(fromdocname, 'term not in glossary: %s' % target,
|
|
1062 |
node.line)
|
|
1063 |
elif typ == 'citation':
|
|
1064 |
self.warn(fromdocname, 'citation not found: %s' % target,
|
|
1065 |
node.line)
|
|
1066 |
newnode = contnode
|
|
1067 |
else:
|
|
1068 |
newnode = nodes.reference('', '')
|
|
1069 |
if docname == fromdocname:
|
|
1070 |
newnode['refid'] = labelid
|
|
1071 |
else:
|
|
1072 |
newnode['refuri'] = builder.get_relative_uri(
|
|
1073 |
fromdocname, docname, typ) + '#' + labelid
|
|
1074 |
newnode.append(contnode)
|
|
1075 |
elif typ == 'mod':
|
|
1076 |
docname, synopsis, platform, deprecated = \
|
|
1077 |
self.modules.get(target, ('','','', ''))
|
|
1078 |
if not docname:
|
|
1079 |
newnode = builder.app.emit_firstresult('missing-reference',
|
|
1080 |
self, node, contnode)
|
|
1081 |
if not newnode:
|
|
1082 |
newnode = contnode
|
|
1083 |
elif docname == fromdocname:
|
|
1084 |
# don't link to self
|
|
1085 |
newnode = contnode
|
|
1086 |
else:
|
|
1087 |
newnode = nodes.reference('', '')
|
|
1088 |
newnode['refuri'] = builder.get_relative_uri(
|
|
1089 |
fromdocname, docname) + '#module-' + target
|
|
1090 |
newnode['reftitle'] = '%s%s%s' % (
|
|
1091 |
(platform and '(%s) ' % platform),
|
|
1092 |
synopsis, (deprecated and ' (deprecated)' or ''))
|
|
1093 |
newnode.append(contnode)
|
|
1094 |
elif typ in self.descroles:
|
|
1095 |
# "descrefs"
|
|
1096 |
modname = node['modname']
|
|
1097 |
clsname = node['classname']
|
|
1098 |
searchorder = node.hasattr('refspecific') and 1 or 0
|
|
1099 |
name, desc = self.find_desc(modname, clsname,
|
|
1100 |
target, typ, searchorder)
|
|
1101 |
if not desc:
|
|
1102 |
newnode = builder.app.emit_firstresult('missing-reference',
|
|
1103 |
self, node, contnode)
|
|
1104 |
if not newnode:
|
|
1105 |
newnode = contnode
|
|
1106 |
else:
|
|
1107 |
newnode = nodes.reference('', '')
|
|
1108 |
if desc[0] == fromdocname:
|
|
1109 |
newnode['refid'] = name
|
|
1110 |
else:
|
|
1111 |
newnode['refuri'] = (
|
|
1112 |
builder.get_relative_uri(fromdocname, desc[0])
|
|
1113 |
+ '#' + name)
|
|
1114 |
newnode['reftitle'] = name
|
|
1115 |
newnode.append(contnode)
|
|
1116 |
else:
|
|
1117 |
raise RuntimeError('unknown xfileref node encountered: %s' % node)
|
|
1118 |
except NoUri:
|
|
1119 |
newnode = contnode
|
|
1120 |
if newnode:
|
|
1121 |
node.replace_self(newnode)
|
|
1122 |
|
|
1123 |
# allow custom references to be resolved
|
|
1124 |
builder.app.emit('doctree-resolved', doctree, fromdocname)
|
|
1125 |
|
|
1126 |
def create_index(self, builder, _fixre=re.compile(r'(.*) ([(][^()]*[)])')):
|
|
1127 |
"""Create the real index from the collected index entries."""
|
|
1128 |
new = {}
|
|
1129 |
|
|
1130 |
def add_entry(word, subword, dic=new):
|
|
1131 |
entry = dic.get(word)
|
|
1132 |
if not entry:
|
|
1133 |
dic[word] = entry = [[], {}]
|
|
1134 |
if subword:
|
|
1135 |
add_entry(subword, '', dic=entry[1])
|
|
1136 |
else:
|
|
1137 |
try:
|
|
1138 |
entry[0].append(builder.get_relative_uri('genindex', fn)
|
|
1139 |
+ '#' + tid)
|
|
1140 |
except NoUri:
|
|
1141 |
pass
|
|
1142 |
|
|
1143 |
for fn, entries in self.indexentries.iteritems():
|
|
1144 |
# new entry types must be listed in directives/other.py!
|
|
1145 |
for type, string, tid, alias in entries:
|
|
1146 |
if type == 'single':
|
|
1147 |
try:
|
|
1148 |
entry, subentry = string.split(';', 1)
|
|
1149 |
except ValueError:
|
|
1150 |
entry, subentry = string, ''
|
|
1151 |
if not entry:
|
|
1152 |
self.warn(fn, 'invalid index entry %r' % string)
|
|
1153 |
continue
|
|
1154 |
add_entry(entry.strip(), subentry.strip())
|
|
1155 |
elif type == 'pair':
|
|
1156 |
try:
|
|
1157 |
first, second = map(lambda x: x.strip(),
|
|
1158 |
string.split(';', 1))
|
|
1159 |
if not first or not second:
|
|
1160 |
raise ValueError
|
|
1161 |
except ValueError:
|
|
1162 |
self.warn(fn, 'invalid pair index entry %r' % string)
|
|
1163 |
continue
|
|
1164 |
add_entry(first, second)
|
|
1165 |
add_entry(second, first)
|
|
1166 |
elif type == 'triple':
|
|
1167 |
try:
|
|
1168 |
first, second, third = map(lambda x: x.strip(),
|
|
1169 |
string.split(';', 2))
|
|
1170 |
if not first or not second or not third:
|
|
1171 |
raise ValueError
|
|
1172 |
except ValueError:
|
|
1173 |
self.warn(fn, 'invalid triple index entry %r' % string)
|
|
1174 |
continue
|
|
1175 |
add_entry(first, second+' '+third)
|
|
1176 |
add_entry(second, third+', '+first)
|
|
1177 |
add_entry(third, first+' '+second)
|
|
1178 |
else:
|
|
1179 |
self.warn(fn, 'unknown index entry type %r' % type)
|
|
1180 |
|
|
1181 |
newlist = new.items()
|
|
1182 |
newlist.sort(key=lambda t: t[0].lower())
|
|
1183 |
|
|
1184 |
# fixup entries: transform
|
|
1185 |
# func() (in module foo)
|
|
1186 |
# func() (in module bar)
|
|
1187 |
# into
|
|
1188 |
# func()
|
|
1189 |
# (in module foo)
|
|
1190 |
# (in module bar)
|
|
1191 |
oldkey = ''
|
|
1192 |
oldsubitems = None
|
|
1193 |
i = 0
|
|
1194 |
while i < len(newlist):
|
|
1195 |
key, (targets, subitems) = newlist[i]
|
|
1196 |
# cannot move if it hassubitems; structure gets too complex
|
|
1197 |
if not subitems:
|
|
1198 |
m = _fixre.match(key)
|
|
1199 |
if m:
|
|
1200 |
if oldkey == m.group(1):
|
|
1201 |
# prefixes match: add entry as subitem of the previous entry
|
|
1202 |
oldsubitems.setdefault(m.group(2), [[], {}])[0].extend(targets)
|
|
1203 |
del newlist[i]
|
|
1204 |
continue
|
|
1205 |
oldkey = m.group(1)
|
|
1206 |
else:
|
|
1207 |
oldkey = key
|
|
1208 |
oldsubitems = subitems
|
|
1209 |
i += 1
|
|
1210 |
|
|
1211 |
# group the entries by letter
|
|
1212 |
def keyfunc((k, v), ltrs=uppercase+'_'):
|
|
1213 |
# hack: mutate the subitems dicts to a list in the keyfunc
|
|
1214 |
v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
|
|
1215 |
# now calculate the key
|
|
1216 |
letter = k[0].upper()
|
|
1217 |
if letter in ltrs:
|
|
1218 |
return letter
|
|
1219 |
else:
|
|
1220 |
# get all other symbols under one heading
|
|
1221 |
return 'Symbols'
|
|
1222 |
return [(key, list(group)) for (key, group) in groupby(newlist, keyfunc)]
|
|
1223 |
|
|
1224 |
def collect_relations(self):
|
|
1225 |
relations = {}
|
|
1226 |
getinc = self.toctree_includes.get
|
|
1227 |
def collect(parents, docname, previous, next):
|
|
1228 |
includes = getinc(docname)
|
|
1229 |
# previous
|
|
1230 |
if not previous:
|
|
1231 |
# if no previous sibling, go to parent
|
|
1232 |
previous = parents[0][0]
|
|
1233 |
else:
|
|
1234 |
# else, go to previous sibling, or if it has children, to
|
|
1235 |
# the last of its children, or if that has children, to the
|
|
1236 |
# last of those, and so forth
|
|
1237 |
while 1:
|
|
1238 |
previncs = getinc(previous)
|
|
1239 |
if previncs:
|
|
1240 |
previous = previncs[-1]
|
|
1241 |
else:
|
|
1242 |
break
|
|
1243 |
# next
|
|
1244 |
if includes:
|
|
1245 |
# if it has children, go to first of them
|
|
1246 |
next = includes[0]
|
|
1247 |
elif next:
|
|
1248 |
# else, if next sibling, go to it
|
|
1249 |
pass
|
|
1250 |
else:
|
|
1251 |
# else, go to the next sibling of the parent, if present,
|
|
1252 |
# else the grandparent's sibling, if present, and so forth
|
|
1253 |
for parname, parindex in parents:
|
|
1254 |
parincs = getinc(parname)
|
|
1255 |
if parincs and parindex + 1 < len(parincs):
|
|
1256 |
next = parincs[parindex+1]
|
|
1257 |
break
|
|
1258 |
# else it will stay None
|
|
1259 |
# same for children
|
|
1260 |
if includes:
|
|
1261 |
for subindex, args in enumerate(izip(includes, [None] + includes,
|
|
1262 |
includes[1:] + [None])):
|
|
1263 |
collect([(docname, subindex)] + parents, *args)
|
|
1264 |
relations[docname] = [parents[0][0], previous, next]
|
|
1265 |
collect([(None, 0)], self.config.master_doc, None, None)
|
|
1266 |
return relations
|
|
1267 |
|
|
1268 |
def check_consistency(self):
|
|
1269 |
"""Do consistency checks."""
|
|
1270 |
|
|
1271 |
for docname in sorted(self.all_docs):
|
|
1272 |
if docname not in self.files_to_rebuild:
|
|
1273 |
if docname == self.config.master_doc:
|
|
1274 |
# the master file is not included anywhere ;)
|
|
1275 |
continue
|
|
1276 |
self.warn(docname, 'document isn\'t included in any toctree')
|
|
1277 |
|
|
1278 |
# --------- QUERYING -------------------------------------------------------
|
|
1279 |
|
|
1280 |
def find_desc(self, modname, classname, name, type, searchorder=0):
|
|
1281 |
"""Find a description node matching "name", perhaps using
|
|
1282 |
the given module and/or classname."""
|
|
1283 |
# skip parens
|
|
1284 |
if name[-2:] == '()':
|
|
1285 |
name = name[:-2]
|
|
1286 |
|
|
1287 |
if not name:
|
|
1288 |
return None, None
|
|
1289 |
|
|
1290 |
# don't add module and class names for C things
|
|
1291 |
if type[0] == 'c' and type not in ('class', 'const'):
|
|
1292 |
# skip trailing star and whitespace
|
|
1293 |
name = name.rstrip(' *')
|
|
1294 |
if name in self.descrefs and self.descrefs[name][1][0] == 'c':
|
|
1295 |
return name, self.descrefs[name]
|
|
1296 |
return None, None
|
|
1297 |
|
|
1298 |
newname = None
|
|
1299 |
if searchorder == 1:
|
|
1300 |
if modname and classname and \
|
|
1301 |
modname + '.' + classname + '.' + name in self.descrefs:
|
|
1302 |
newname = modname + '.' + classname + '.' + name
|
|
1303 |
elif modname and modname + '.' + name in self.descrefs:
|
|
1304 |
newname = modname + '.' + name
|
|
1305 |
elif name in self.descrefs:
|
|
1306 |
newname = name
|
|
1307 |
else:
|
|
1308 |
if name in self.descrefs:
|
|
1309 |
newname = name
|
|
1310 |
elif modname and modname + '.' + name in self.descrefs:
|
|
1311 |
newname = modname + '.' + name
|
|
1312 |
elif modname and classname and \
|
|
1313 |
modname + '.' + classname + '.' + name in self.descrefs:
|
|
1314 |
newname = modname + '.' + classname + '.' + name
|
|
1315 |
# special case: builtin exceptions have module "exceptions" set
|
|
1316 |
elif type == 'exc' and '.' not in name and \
|
|
1317 |
'exceptions.' + name in self.descrefs:
|
|
1318 |
newname = 'exceptions.' + name
|
|
1319 |
# special case: object methods
|
|
1320 |
elif type in ('func', 'meth') and '.' not in name and \
|
|
1321 |
'object.' + name in self.descrefs:
|
|
1322 |
newname = 'object.' + name
|
|
1323 |
if newname is None:
|
|
1324 |
return None, None
|
|
1325 |
return newname, self.descrefs[newname]
|
|
1326 |
|
|
1327 |
def find_keyword(self, keyword, avoid_fuzzy=False, cutoff=0.6, n=20):
|
|
1328 |
"""
|
|
1329 |
Find keyword matches for a keyword. If there's an exact match, just return
|
|
1330 |
it, else return a list of fuzzy matches if avoid_fuzzy isn't True.
|
|
1331 |
|
|
1332 |
Keywords searched are: first modules, then descrefs.
|
|
1333 |
|
|
1334 |
Returns: None if nothing found
|
|
1335 |
(type, docname, anchorname) if exact match found
|
|
1336 |
list of (quality, type, docname, anchorname, description) if fuzzy
|
|
1337 |
"""
|
|
1338 |
|
|
1339 |
if keyword in self.modules:
|
|
1340 |
docname, title, system, deprecated = self.modules[keyword]
|
|
1341 |
return 'module', docname, 'module-' + keyword
|
|
1342 |
if keyword in self.descrefs:
|
|
1343 |
docname, ref_type = self.descrefs[keyword]
|
|
1344 |
return ref_type, docname, keyword
|
|
1345 |
# special cases
|
|
1346 |
if '.' not in keyword:
|
|
1347 |
# exceptions are documented in the exceptions module
|
|
1348 |
if 'exceptions.'+keyword in self.descrefs:
|
|
1349 |
docname, ref_type = self.descrefs['exceptions.'+keyword]
|
|
1350 |
return ref_type, docname, 'exceptions.'+keyword
|
|
1351 |
# special methods are documented as object methods
|
|
1352 |
if 'object.'+keyword in self.descrefs:
|
|
1353 |
docname, ref_type = self.descrefs['object.'+keyword]
|
|
1354 |
return ref_type, docname, 'object.'+keyword
|
|
1355 |
|
|
1356 |
if avoid_fuzzy:
|
|
1357 |
return
|
|
1358 |
|
|
1359 |
# find fuzzy matches
|
|
1360 |
s = difflib.SequenceMatcher()
|
|
1361 |
s.set_seq2(keyword.lower())
|
|
1362 |
|
|
1363 |
def possibilities():
|
|
1364 |
for title, (fn, desc, _, _) in self.modules.iteritems():
|
|
1365 |
yield ('module', fn, 'module-'+title, desc)
|
|
1366 |
for title, (fn, desctype) in self.descrefs.iteritems():
|
|
1367 |
yield (desctype, fn, title, '')
|
|
1368 |
|
|
1369 |
def dotsearch(string):
|
|
1370 |
parts = string.lower().split('.')
|
|
1371 |
for idx in xrange(0, len(parts)):
|
|
1372 |
yield '.'.join(parts[idx:])
|
|
1373 |
|
|
1374 |
result = []
|
|
1375 |
for type, docname, title, desc in possibilities():
|
|
1376 |
best_res = 0
|
|
1377 |
for part in dotsearch(title):
|
|
1378 |
s.set_seq1(part)
|
|
1379 |
if s.real_quick_ratio() >= cutoff and \
|
|
1380 |
s.quick_ratio() >= cutoff and \
|
|
1381 |
s.ratio() >= cutoff and \
|
|
1382 |
s.ratio() > best_res:
|
|
1383 |
best_res = s.ratio()
|
|
1384 |
if best_res:
|
|
1385 |
result.append((best_res, type, docname, title, desc))
|
|
1386 |
|
|
1387 |
return heapq.nlargest(n, result)
|