symbian-qemu-0.9.1-12/python-2.6.1/Lib/difflib.py
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 #! /usr/bin/env python
       
     2 
       
     3 """
       
     4 Module difflib -- helpers for computing deltas between objects.
       
     5 
       
     6 Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
       
     7     Use SequenceMatcher to return list of the best "good enough" matches.
       
     8 
       
     9 Function context_diff(a, b):
       
    10     For two lists of strings, return a delta in context diff format.
       
    11 
       
    12 Function ndiff(a, b):
       
    13     Return a delta: the difference between `a` and `b` (lists of strings).
       
    14 
       
    15 Function restore(delta, which):
       
    16     Return one of the two sequences that generated an ndiff delta.
       
    17 
       
    18 Function unified_diff(a, b):
       
    19     For two lists of strings, return a delta in unified diff format.
       
    20 
       
    21 Class SequenceMatcher:
       
    22     A flexible class for comparing pairs of sequences of any type.
       
    23 
       
    24 Class Differ:
       
    25     For producing human-readable deltas from sequences of lines of text.
       
    26 
       
    27 Class HtmlDiff:
       
    28     For producing HTML side by side comparison with change highlights.
       
    29 """
       
    30 
       
    31 __all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
       
    32            'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
       
    33            'unified_diff', 'HtmlDiff', 'Match']
       
    34 
       
    35 import heapq
       
    36 from collections import namedtuple as _namedtuple
       
    37 from functools import reduce
       
    38 
       
    39 Match = _namedtuple('Match', 'a b size')
       
    40 
       
    41 def _calculate_ratio(matches, length):
       
    42     if length:
       
    43         return 2.0 * matches / length
       
    44     return 1.0
       
    45 
       
    46 class SequenceMatcher:
       
    47 
       
    48     """
       
    49     SequenceMatcher is a flexible class for comparing pairs of sequences of
       
    50     any type, so long as the sequence elements are hashable.  The basic
       
    51     algorithm predates, and is a little fancier than, an algorithm
       
    52     published in the late 1980's by Ratcliff and Obershelp under the
       
    53     hyperbolic name "gestalt pattern matching".  The basic idea is to find
       
    54     the longest contiguous matching subsequence that contains no "junk"
       
    55     elements (R-O doesn't address junk).  The same idea is then applied
       
    56     recursively to the pieces of the sequences to the left and to the right
       
    57     of the matching subsequence.  This does not yield minimal edit
       
    58     sequences, but does tend to yield matches that "look right" to people.
       
    59 
       
    60     SequenceMatcher tries to compute a "human-friendly diff" between two
       
    61     sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
       
    62     longest *contiguous* & junk-free matching subsequence.  That's what
       
    63     catches peoples' eyes.  The Windows(tm) windiff has another interesting
       
    64     notion, pairing up elements that appear uniquely in each sequence.
       
    65     That, and the method here, appear to yield more intuitive difference
       
    66     reports than does diff.  This method appears to be the least vulnerable
       
    67     to synching up on blocks of "junk lines", though (like blank lines in
       
    68     ordinary text files, or maybe "<P>" lines in HTML files).  That may be
       
    69     because this is the only method of the 3 that has a *concept* of
       
    70     "junk" <wink>.
       
    71 
       
    72     Example, comparing two strings, and considering blanks to be "junk":
       
    73 
       
    74     >>> s = SequenceMatcher(lambda x: x == " ",
       
    75     ...                     "private Thread currentThread;",
       
    76     ...                     "private volatile Thread currentThread;")
       
    77     >>>
       
    78 
       
    79     .ratio() returns a float in [0, 1], measuring the "similarity" of the
       
    80     sequences.  As a rule of thumb, a .ratio() value over 0.6 means the
       
    81     sequences are close matches:
       
    82 
       
    83     >>> print round(s.ratio(), 3)
       
    84     0.866
       
    85     >>>
       
    86 
       
    87     If you're only interested in where the sequences match,
       
    88     .get_matching_blocks() is handy:
       
    89 
       
    90     >>> for block in s.get_matching_blocks():
       
    91     ...     print "a[%d] and b[%d] match for %d elements" % block
       
    92     a[0] and b[0] match for 8 elements
       
    93     a[8] and b[17] match for 21 elements
       
    94     a[29] and b[38] match for 0 elements
       
    95 
       
    96     Note that the last tuple returned by .get_matching_blocks() is always a
       
    97     dummy, (len(a), len(b), 0), and this is the only case in which the last
       
    98     tuple element (number of elements matched) is 0.
       
    99 
       
   100     If you want to know how to change the first sequence into the second,
       
   101     use .get_opcodes():
       
   102 
       
   103     >>> for opcode in s.get_opcodes():
       
   104     ...     print "%6s a[%d:%d] b[%d:%d]" % opcode
       
   105      equal a[0:8] b[0:8]
       
   106     insert a[8:8] b[8:17]
       
   107      equal a[8:29] b[17:38]
       
   108 
       
   109     See the Differ class for a fancy human-friendly file differencer, which
       
   110     uses SequenceMatcher both to compare sequences of lines, and to compare
       
   111     sequences of characters within similar (near-matching) lines.
       
   112 
       
   113     See also function get_close_matches() in this module, which shows how
       
   114     simple code building on SequenceMatcher can be used to do useful work.
       
   115 
       
   116     Timing:  Basic R-O is cubic time worst case and quadratic time expected
       
   117     case.  SequenceMatcher is quadratic time for the worst case and has
       
   118     expected-case behavior dependent in a complicated way on how many
       
   119     elements the sequences have in common; best case time is linear.
       
   120 
       
   121     Methods:
       
   122 
       
   123     __init__(isjunk=None, a='', b='')
       
   124         Construct a SequenceMatcher.
       
   125 
       
   126     set_seqs(a, b)
       
   127         Set the two sequences to be compared.
       
   128 
       
   129     set_seq1(a)
       
   130         Set the first sequence to be compared.
       
   131 
       
   132     set_seq2(b)
       
   133         Set the second sequence to be compared.
       
   134 
       
   135     find_longest_match(alo, ahi, blo, bhi)
       
   136         Find longest matching block in a[alo:ahi] and b[blo:bhi].
       
   137 
       
   138     get_matching_blocks()
       
   139         Return list of triples describing matching subsequences.
       
   140 
       
   141     get_opcodes()
       
   142         Return list of 5-tuples describing how to turn a into b.
       
   143 
       
   144     ratio()
       
   145         Return a measure of the sequences' similarity (float in [0,1]).
       
   146 
       
   147     quick_ratio()
       
   148         Return an upper bound on .ratio() relatively quickly.
       
   149 
       
   150     real_quick_ratio()
       
   151         Return an upper bound on ratio() very quickly.
       
   152     """
       
   153 
       
   154     def __init__(self, isjunk=None, a='', b=''):
       
   155         """Construct a SequenceMatcher.
       
   156 
       
   157         Optional arg isjunk is None (the default), or a one-argument
       
   158         function that takes a sequence element and returns true iff the
       
   159         element is junk.  None is equivalent to passing "lambda x: 0", i.e.
       
   160         no elements are considered to be junk.  For example, pass
       
   161             lambda x: x in " \\t"
       
   162         if you're comparing lines as sequences of characters, and don't
       
   163         want to synch up on blanks or hard tabs.
       
   164 
       
   165         Optional arg a is the first of two sequences to be compared.  By
       
   166         default, an empty string.  The elements of a must be hashable.  See
       
   167         also .set_seqs() and .set_seq1().
       
   168 
       
   169         Optional arg b is the second of two sequences to be compared.  By
       
   170         default, an empty string.  The elements of b must be hashable. See
       
   171         also .set_seqs() and .set_seq2().
       
   172         """
       
   173 
       
   174         # Members:
       
   175         # a
       
   176         #      first sequence
       
   177         # b
       
   178         #      second sequence; differences are computed as "what do
       
   179         #      we need to do to 'a' to change it into 'b'?"
       
   180         # b2j
       
   181         #      for x in b, b2j[x] is a list of the indices (into b)
       
   182         #      at which x appears; junk elements do not appear
       
   183         # fullbcount
       
   184         #      for x in b, fullbcount[x] == the number of times x
       
   185         #      appears in b; only materialized if really needed (used
       
   186         #      only for computing quick_ratio())
       
   187         # matching_blocks
       
   188         #      a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
       
   189         #      ascending & non-overlapping in i and in j; terminated by
       
   190         #      a dummy (len(a), len(b), 0) sentinel
       
   191         # opcodes
       
   192         #      a list of (tag, i1, i2, j1, j2) tuples, where tag is
       
   193         #      one of
       
   194         #          'replace'   a[i1:i2] should be replaced by b[j1:j2]
       
   195         #          'delete'    a[i1:i2] should be deleted
       
   196         #          'insert'    b[j1:j2] should be inserted
       
   197         #          'equal'     a[i1:i2] == b[j1:j2]
       
   198         # isjunk
       
   199         #      a user-supplied function taking a sequence element and
       
   200         #      returning true iff the element is "junk" -- this has
       
   201         #      subtle but helpful effects on the algorithm, which I'll
       
   202         #      get around to writing up someday <0.9 wink>.
       
   203         #      DON'T USE!  Only __chain_b uses this.  Use isbjunk.
       
   204         # isbjunk
       
   205         #      for x in b, isbjunk(x) == isjunk(x) but much faster;
       
   206         #      it's really the __contains__ method of a hidden dict.
       
   207         #      DOES NOT WORK for x in a!
       
   208         # isbpopular
       
   209         #      for x in b, isbpopular(x) is true iff b is reasonably long
       
   210         #      (at least 200 elements) and x accounts for more than 1% of
       
   211         #      its elements.  DOES NOT WORK for x in a!
       
   212 
       
   213         self.isjunk = isjunk
       
   214         self.a = self.b = None
       
   215         self.set_seqs(a, b)
       
   216 
       
   217     def set_seqs(self, a, b):
       
   218         """Set the two sequences to be compared.
       
   219 
       
   220         >>> s = SequenceMatcher()
       
   221         >>> s.set_seqs("abcd", "bcde")
       
   222         >>> s.ratio()
       
   223         0.75
       
   224         """
       
   225 
       
   226         self.set_seq1(a)
       
   227         self.set_seq2(b)
       
   228 
       
   229     def set_seq1(self, a):
       
   230         """Set the first sequence to be compared.
       
   231 
       
   232         The second sequence to be compared is not changed.
       
   233 
       
   234         >>> s = SequenceMatcher(None, "abcd", "bcde")
       
   235         >>> s.ratio()
       
   236         0.75
       
   237         >>> s.set_seq1("bcde")
       
   238         >>> s.ratio()
       
   239         1.0
       
   240         >>>
       
   241 
       
   242         SequenceMatcher computes and caches detailed information about the
       
   243         second sequence, so if you want to compare one sequence S against
       
   244         many sequences, use .set_seq2(S) once and call .set_seq1(x)
       
   245         repeatedly for each of the other sequences.
       
   246 
       
   247         See also set_seqs() and set_seq2().
       
   248         """
       
   249 
       
   250         if a is self.a:
       
   251             return
       
   252         self.a = a
       
   253         self.matching_blocks = self.opcodes = None
       
   254 
       
   255     def set_seq2(self, b):
       
   256         """Set the second sequence to be compared.
       
   257 
       
   258         The first sequence to be compared is not changed.
       
   259 
       
   260         >>> s = SequenceMatcher(None, "abcd", "bcde")
       
   261         >>> s.ratio()
       
   262         0.75
       
   263         >>> s.set_seq2("abcd")
       
   264         >>> s.ratio()
       
   265         1.0
       
   266         >>>
       
   267 
       
   268         SequenceMatcher computes and caches detailed information about the
       
   269         second sequence, so if you want to compare one sequence S against
       
   270         many sequences, use .set_seq2(S) once and call .set_seq1(x)
       
   271         repeatedly for each of the other sequences.
       
   272 
       
   273         See also set_seqs() and set_seq1().
       
   274         """
       
   275 
       
   276         if b is self.b:
       
   277             return
       
   278         self.b = b
       
   279         self.matching_blocks = self.opcodes = None
       
   280         self.fullbcount = None
       
   281         self.__chain_b()
       
   282 
       
   283     # For each element x in b, set b2j[x] to a list of the indices in
       
   284     # b where x appears; the indices are in increasing order; note that
       
   285     # the number of times x appears in b is len(b2j[x]) ...
       
   286     # when self.isjunk is defined, junk elements don't show up in this
       
   287     # map at all, which stops the central find_longest_match method
       
   288     # from starting any matching block at a junk element ...
       
   289     # also creates the fast isbjunk function ...
       
   290     # b2j also does not contain entries for "popular" elements, meaning
       
   291     # elements that account for more than 1% of the total elements, and
       
   292     # when the sequence is reasonably large (>= 200 elements); this can
       
   293     # be viewed as an adaptive notion of semi-junk, and yields an enormous
       
   294     # speedup when, e.g., comparing program files with hundreds of
       
   295     # instances of "return NULL;" ...
       
   296     # note that this is only called when b changes; so for cross-product
       
   297     # kinds of matches, it's best to call set_seq2 once, then set_seq1
       
   298     # repeatedly
       
   299 
       
   300     def __chain_b(self):
       
   301         # Because isjunk is a user-defined (not C) function, and we test
       
   302         # for junk a LOT, it's important to minimize the number of calls.
       
   303         # Before the tricks described here, __chain_b was by far the most
       
   304         # time-consuming routine in the whole module!  If anyone sees
       
   305         # Jim Roskind, thank him again for profile.py -- I never would
       
   306         # have guessed that.
       
   307         # The first trick is to build b2j ignoring the possibility
       
   308         # of junk.  I.e., we don't call isjunk at all yet.  Throwing
       
   309         # out the junk later is much cheaper than building b2j "right"
       
   310         # from the start.
       
   311         b = self.b
       
   312         n = len(b)
       
   313         self.b2j = b2j = {}
       
   314         populardict = {}
       
   315         for i, elt in enumerate(b):
       
   316             if elt in b2j:
       
   317                 indices = b2j[elt]
       
   318                 if n >= 200 and len(indices) * 100 > n:
       
   319                     populardict[elt] = 1
       
   320                     del indices[:]
       
   321                 else:
       
   322                     indices.append(i)
       
   323             else:
       
   324                 b2j[elt] = [i]
       
   325 
       
   326         # Purge leftover indices for popular elements.
       
   327         for elt in populardict:
       
   328             del b2j[elt]
       
   329 
       
   330         # Now b2j.keys() contains elements uniquely, and especially when
       
   331         # the sequence is a string, that's usually a good deal smaller
       
   332         # than len(string).  The difference is the number of isjunk calls
       
   333         # saved.
       
   334         isjunk = self.isjunk
       
   335         junkdict = {}
       
   336         if isjunk:
       
   337             for d in populardict, b2j:
       
   338                 for elt in d.keys():
       
   339                     if isjunk(elt):
       
   340                         junkdict[elt] = 1
       
   341                         del d[elt]
       
   342 
       
   343         # Now for x in b, isjunk(x) == x in junkdict, but the
       
   344         # latter is much faster.  Note too that while there may be a
       
   345         # lot of junk in the sequence, the number of *unique* junk
       
   346         # elements is probably small.  So the memory burden of keeping
       
   347         # this dict alive is likely trivial compared to the size of b2j.
       
   348         self.isbjunk = junkdict.__contains__
       
   349         self.isbpopular = populardict.__contains__
       
   350 
       
   351     def find_longest_match(self, alo, ahi, blo, bhi):
       
   352         """Find longest matching block in a[alo:ahi] and b[blo:bhi].
       
   353 
       
   354         If isjunk is not defined:
       
   355 
       
   356         Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
       
   357             alo <= i <= i+k <= ahi
       
   358             blo <= j <= j+k <= bhi
       
   359         and for all (i',j',k') meeting those conditions,
       
   360             k >= k'
       
   361             i <= i'
       
   362             and if i == i', j <= j'
       
   363 
       
   364         In other words, of all maximal matching blocks, return one that
       
   365         starts earliest in a, and of all those maximal matching blocks that
       
   366         start earliest in a, return the one that starts earliest in b.
       
   367 
       
   368         >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
       
   369         >>> s.find_longest_match(0, 5, 0, 9)
       
   370         Match(a=0, b=4, size=5)
       
   371 
       
   372         If isjunk is defined, first the longest matching block is
       
   373         determined as above, but with the additional restriction that no
       
   374         junk element appears in the block.  Then that block is extended as
       
   375         far as possible by matching (only) junk elements on both sides.  So
       
   376         the resulting block never matches on junk except as identical junk
       
   377         happens to be adjacent to an "interesting" match.
       
   378 
       
   379         Here's the same example as before, but considering blanks to be
       
   380         junk.  That prevents " abcd" from matching the " abcd" at the tail
       
   381         end of the second sequence directly.  Instead only the "abcd" can
       
   382         match, and matches the leftmost "abcd" in the second sequence:
       
   383 
       
   384         >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
       
   385         >>> s.find_longest_match(0, 5, 0, 9)
       
   386         Match(a=1, b=0, size=4)
       
   387 
       
   388         If no blocks match, return (alo, blo, 0).
       
   389 
       
   390         >>> s = SequenceMatcher(None, "ab", "c")
       
   391         >>> s.find_longest_match(0, 2, 0, 1)
       
   392         Match(a=0, b=0, size=0)
       
   393         """
       
   394 
       
   395         # CAUTION:  stripping common prefix or suffix would be incorrect.
       
   396         # E.g.,
       
   397         #    ab
       
   398         #    acab
       
   399         # Longest matching block is "ab", but if common prefix is
       
   400         # stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
       
   401         # strip, so ends up claiming that ab is changed to acab by
       
   402         # inserting "ca" in the middle.  That's minimal but unintuitive:
       
   403         # "it's obvious" that someone inserted "ac" at the front.
       
   404         # Windiff ends up at the same place as diff, but by pairing up
       
   405         # the unique 'b's and then matching the first two 'a's.
       
   406 
       
   407         a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
       
   408         besti, bestj, bestsize = alo, blo, 0
       
   409         # find longest junk-free match
       
   410         # during an iteration of the loop, j2len[j] = length of longest
       
   411         # junk-free match ending with a[i-1] and b[j]
       
   412         j2len = {}
       
   413         nothing = []
       
   414         for i in xrange(alo, ahi):
       
   415             # look at all instances of a[i] in b; note that because
       
   416             # b2j has no junk keys, the loop is skipped if a[i] is junk
       
   417             j2lenget = j2len.get
       
   418             newj2len = {}
       
   419             for j in b2j.get(a[i], nothing):
       
   420                 # a[i] matches b[j]
       
   421                 if j < blo:
       
   422                     continue
       
   423                 if j >= bhi:
       
   424                     break
       
   425                 k = newj2len[j] = j2lenget(j-1, 0) + 1
       
   426                 if k > bestsize:
       
   427                     besti, bestj, bestsize = i-k+1, j-k+1, k
       
   428             j2len = newj2len
       
   429 
       
   430         # Extend the best by non-junk elements on each end.  In particular,
       
   431         # "popular" non-junk elements aren't in b2j, which greatly speeds
       
   432         # the inner loop above, but also means "the best" match so far
       
   433         # doesn't contain any junk *or* popular non-junk elements.
       
   434         while besti > alo and bestj > blo and \
       
   435               not isbjunk(b[bestj-1]) and \
       
   436               a[besti-1] == b[bestj-1]:
       
   437             besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
       
   438         while besti+bestsize < ahi and bestj+bestsize < bhi and \
       
   439               not isbjunk(b[bestj+bestsize]) and \
       
   440               a[besti+bestsize] == b[bestj+bestsize]:
       
   441             bestsize += 1
       
   442 
       
   443         # Now that we have a wholly interesting match (albeit possibly
       
   444         # empty!), we may as well suck up the matching junk on each
       
   445         # side of it too.  Can't think of a good reason not to, and it
       
   446         # saves post-processing the (possibly considerable) expense of
       
   447         # figuring out what to do with it.  In the case of an empty
       
   448         # interesting match, this is clearly the right thing to do,
       
   449         # because no other kind of match is possible in the regions.
       
   450         while besti > alo and bestj > blo and \
       
   451               isbjunk(b[bestj-1]) and \
       
   452               a[besti-1] == b[bestj-1]:
       
   453             besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
       
   454         while besti+bestsize < ahi and bestj+bestsize < bhi and \
       
   455               isbjunk(b[bestj+bestsize]) and \
       
   456               a[besti+bestsize] == b[bestj+bestsize]:
       
   457             bestsize = bestsize + 1
       
   458 
       
   459         return Match(besti, bestj, bestsize)
       
   460 
       
   461     def get_matching_blocks(self):
       
   462         """Return list of triples describing matching subsequences.
       
   463 
       
   464         Each triple is of the form (i, j, n), and means that
       
   465         a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
       
   466         i and in j.  New in Python 2.5, it's also guaranteed that if
       
   467         (i, j, n) and (i', j', n') are adjacent triples in the list, and
       
   468         the second is not the last triple in the list, then i+n != i' or
       
   469         j+n != j'.  IOW, adjacent triples never describe adjacent equal
       
   470         blocks.
       
   471 
       
   472         The last triple is a dummy, (len(a), len(b), 0), and is the only
       
   473         triple with n==0.
       
   474 
       
   475         >>> s = SequenceMatcher(None, "abxcd", "abcd")
       
   476         >>> s.get_matching_blocks()
       
   477         [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
       
   478         """
       
   479 
       
   480         if self.matching_blocks is not None:
       
   481             return self.matching_blocks
       
   482         la, lb = len(self.a), len(self.b)
       
   483 
       
   484         # This is most naturally expressed as a recursive algorithm, but
       
   485         # at least one user bumped into extreme use cases that exceeded
       
   486         # the recursion limit on their box.  So, now we maintain a list
       
   487         # ('queue`) of blocks we still need to look at, and append partial
       
   488         # results to `matching_blocks` in a loop; the matches are sorted
       
   489         # at the end.
       
   490         queue = [(0, la, 0, lb)]
       
   491         matching_blocks = []
       
   492         while queue:
       
   493             alo, ahi, blo, bhi = queue.pop()
       
   494             i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
       
   495             # a[alo:i] vs b[blo:j] unknown
       
   496             # a[i:i+k] same as b[j:j+k]
       
   497             # a[i+k:ahi] vs b[j+k:bhi] unknown
       
   498             if k:   # if k is 0, there was no matching block
       
   499                 matching_blocks.append(x)
       
   500                 if alo < i and blo < j:
       
   501                     queue.append((alo, i, blo, j))
       
   502                 if i+k < ahi and j+k < bhi:
       
   503                     queue.append((i+k, ahi, j+k, bhi))
       
   504         matching_blocks.sort()
       
   505 
       
   506         # It's possible that we have adjacent equal blocks in the
       
   507         # matching_blocks list now.  Starting with 2.5, this code was added
       
   508         # to collapse them.
       
   509         i1 = j1 = k1 = 0
       
   510         non_adjacent = []
       
   511         for i2, j2, k2 in matching_blocks:
       
   512             # Is this block adjacent to i1, j1, k1?
       
   513             if i1 + k1 == i2 and j1 + k1 == j2:
       
   514                 # Yes, so collapse them -- this just increases the length of
       
   515                 # the first block by the length of the second, and the first
       
   516                 # block so lengthened remains the block to compare against.
       
   517                 k1 += k2
       
   518             else:
       
   519                 # Not adjacent.  Remember the first block (k1==0 means it's
       
   520                 # the dummy we started with), and make the second block the
       
   521                 # new block to compare against.
       
   522                 if k1:
       
   523                     non_adjacent.append((i1, j1, k1))
       
   524                 i1, j1, k1 = i2, j2, k2
       
   525         if k1:
       
   526             non_adjacent.append((i1, j1, k1))
       
   527 
       
   528         non_adjacent.append( (la, lb, 0) )
       
   529         self.matching_blocks = non_adjacent
       
   530         return map(Match._make, self.matching_blocks)
       
   531 
       
   532     def get_opcodes(self):
       
   533         """Return list of 5-tuples describing how to turn a into b.
       
   534 
       
   535         Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
       
   536         has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
       
   537         tuple preceding it, and likewise for j1 == the previous j2.
       
   538 
       
   539         The tags are strings, with these meanings:
       
   540 
       
   541         'replace':  a[i1:i2] should be replaced by b[j1:j2]
       
   542         'delete':   a[i1:i2] should be deleted.
       
   543                     Note that j1==j2 in this case.
       
   544         'insert':   b[j1:j2] should be inserted at a[i1:i1].
       
   545                     Note that i1==i2 in this case.
       
   546         'equal':    a[i1:i2] == b[j1:j2]
       
   547 
       
   548         >>> a = "qabxcd"
       
   549         >>> b = "abycdf"
       
   550         >>> s = SequenceMatcher(None, a, b)
       
   551         >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
       
   552         ...    print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
       
   553         ...           (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
       
   554          delete a[0:1] (q) b[0:0] ()
       
   555           equal a[1:3] (ab) b[0:2] (ab)
       
   556         replace a[3:4] (x) b[2:3] (y)
       
   557           equal a[4:6] (cd) b[3:5] (cd)
       
   558          insert a[6:6] () b[5:6] (f)
       
   559         """
       
   560 
       
   561         if self.opcodes is not None:
       
   562             return self.opcodes
       
   563         i = j = 0
       
   564         self.opcodes = answer = []
       
   565         for ai, bj, size in self.get_matching_blocks():
       
   566             # invariant:  we've pumped out correct diffs to change
       
   567             # a[:i] into b[:j], and the next matching block is
       
   568             # a[ai:ai+size] == b[bj:bj+size].  So we need to pump
       
   569             # out a diff to change a[i:ai] into b[j:bj], pump out
       
   570             # the matching block, and move (i,j) beyond the match
       
   571             tag = ''
       
   572             if i < ai and j < bj:
       
   573                 tag = 'replace'
       
   574             elif i < ai:
       
   575                 tag = 'delete'
       
   576             elif j < bj:
       
   577                 tag = 'insert'
       
   578             if tag:
       
   579                 answer.append( (tag, i, ai, j, bj) )
       
   580             i, j = ai+size, bj+size
       
   581             # the list of matching blocks is terminated by a
       
   582             # sentinel with size 0
       
   583             if size:
       
   584                 answer.append( ('equal', ai, i, bj, j) )
       
   585         return answer
       
   586 
       
   587     def get_grouped_opcodes(self, n=3):
       
   588         """ Isolate change clusters by eliminating ranges with no changes.
       
   589 
       
   590         Return a generator of groups with upto n lines of context.
       
   591         Each group is in the same format as returned by get_opcodes().
       
   592 
       
   593         >>> from pprint import pprint
       
   594         >>> a = map(str, range(1,40))
       
   595         >>> b = a[:]
       
   596         >>> b[8:8] = ['i']     # Make an insertion
       
   597         >>> b[20] += 'x'       # Make a replacement
       
   598         >>> b[23:28] = []      # Make a deletion
       
   599         >>> b[30] += 'y'       # Make another replacement
       
   600         >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
       
   601         [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
       
   602          [('equal', 16, 19, 17, 20),
       
   603           ('replace', 19, 20, 20, 21),
       
   604           ('equal', 20, 22, 21, 23),
       
   605           ('delete', 22, 27, 23, 23),
       
   606           ('equal', 27, 30, 23, 26)],
       
   607          [('equal', 31, 34, 27, 30),
       
   608           ('replace', 34, 35, 30, 31),
       
   609           ('equal', 35, 38, 31, 34)]]
       
   610         """
       
   611 
       
   612         codes = self.get_opcodes()
       
   613         if not codes:
       
   614             codes = [("equal", 0, 1, 0, 1)]
       
   615         # Fixup leading and trailing groups if they show no changes.
       
   616         if codes[0][0] == 'equal':
       
   617             tag, i1, i2, j1, j2 = codes[0]
       
   618             codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
       
   619         if codes[-1][0] == 'equal':
       
   620             tag, i1, i2, j1, j2 = codes[-1]
       
   621             codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
       
   622 
       
   623         nn = n + n
       
   624         group = []
       
   625         for tag, i1, i2, j1, j2 in codes:
       
   626             # End the current group and start a new one whenever
       
   627             # there is a large range with no changes.
       
   628             if tag == 'equal' and i2-i1 > nn:
       
   629                 group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
       
   630                 yield group
       
   631                 group = []
       
   632                 i1, j1 = max(i1, i2-n), max(j1, j2-n)
       
   633             group.append((tag, i1, i2, j1 ,j2))
       
   634         if group and not (len(group)==1 and group[0][0] == 'equal'):
       
   635             yield group
       
   636 
       
   637     def ratio(self):
       
   638         """Return a measure of the sequences' similarity (float in [0,1]).
       
   639 
       
   640         Where T is the total number of elements in both sequences, and
       
   641         M is the number of matches, this is 2.0*M / T.
       
   642         Note that this is 1 if the sequences are identical, and 0 if
       
   643         they have nothing in common.
       
   644 
       
   645         .ratio() is expensive to compute if you haven't already computed
       
   646         .get_matching_blocks() or .get_opcodes(), in which case you may
       
   647         want to try .quick_ratio() or .real_quick_ratio() first to get an
       
   648         upper bound.
       
   649 
       
   650         >>> s = SequenceMatcher(None, "abcd", "bcde")
       
   651         >>> s.ratio()
       
   652         0.75
       
   653         >>> s.quick_ratio()
       
   654         0.75
       
   655         >>> s.real_quick_ratio()
       
   656         1.0
       
   657         """
       
   658 
       
   659         matches = reduce(lambda sum, triple: sum + triple[-1],
       
   660                          self.get_matching_blocks(), 0)
       
   661         return _calculate_ratio(matches, len(self.a) + len(self.b))
       
   662 
       
   663     def quick_ratio(self):
       
   664         """Return an upper bound on ratio() relatively quickly.
       
   665 
       
   666         This isn't defined beyond that it is an upper bound on .ratio(), and
       
   667         is faster to compute.
       
   668         """
       
   669 
       
   670         # viewing a and b as multisets, set matches to the cardinality
       
   671         # of their intersection; this counts the number of matches
       
   672         # without regard to order, so is clearly an upper bound
       
   673         if self.fullbcount is None:
       
   674             self.fullbcount = fullbcount = {}
       
   675             for elt in self.b:
       
   676                 fullbcount[elt] = fullbcount.get(elt, 0) + 1
       
   677         fullbcount = self.fullbcount
       
   678         # avail[x] is the number of times x appears in 'b' less the
       
   679         # number of times we've seen it in 'a' so far ... kinda
       
   680         avail = {}
       
   681         availhas, matches = avail.__contains__, 0
       
   682         for elt in self.a:
       
   683             if availhas(elt):
       
   684                 numb = avail[elt]
       
   685             else:
       
   686                 numb = fullbcount.get(elt, 0)
       
   687             avail[elt] = numb - 1
       
   688             if numb > 0:
       
   689                 matches = matches + 1
       
   690         return _calculate_ratio(matches, len(self.a) + len(self.b))
       
   691 
       
   692     def real_quick_ratio(self):
       
   693         """Return an upper bound on ratio() very quickly.
       
   694 
       
   695         This isn't defined beyond that it is an upper bound on .ratio(), and
       
   696         is faster to compute than either .ratio() or .quick_ratio().
       
   697         """
       
   698 
       
   699         la, lb = len(self.a), len(self.b)
       
   700         # can't have more matches than the number of elements in the
       
   701         # shorter sequence
       
   702         return _calculate_ratio(min(la, lb), la + lb)
       
   703 
       
   704 def get_close_matches(word, possibilities, n=3, cutoff=0.6):
       
   705     """Use SequenceMatcher to return list of the best "good enough" matches.
       
   706 
       
   707     word is a sequence for which close matches are desired (typically a
       
   708     string).
       
   709 
       
   710     possibilities is a list of sequences against which to match word
       
   711     (typically a list of strings).
       
   712 
       
   713     Optional arg n (default 3) is the maximum number of close matches to
       
   714     return.  n must be > 0.
       
   715 
       
   716     Optional arg cutoff (default 0.6) is a float in [0, 1].  Possibilities
       
   717     that don't score at least that similar to word are ignored.
       
   718 
       
   719     The best (no more than n) matches among the possibilities are returned
       
   720     in a list, sorted by similarity score, most similar first.
       
   721 
       
   722     >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
       
   723     ['apple', 'ape']
       
   724     >>> import keyword as _keyword
       
   725     >>> get_close_matches("wheel", _keyword.kwlist)
       
   726     ['while']
       
   727     >>> get_close_matches("apple", _keyword.kwlist)
       
   728     []
       
   729     >>> get_close_matches("accept", _keyword.kwlist)
       
   730     ['except']
       
   731     """
       
   732 
       
   733     if not n >  0:
       
   734         raise ValueError("n must be > 0: %r" % (n,))
       
   735     if not 0.0 <= cutoff <= 1.0:
       
   736         raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
       
   737     result = []
       
   738     s = SequenceMatcher()
       
   739     s.set_seq2(word)
       
   740     for x in possibilities:
       
   741         s.set_seq1(x)
       
   742         if s.real_quick_ratio() >= cutoff and \
       
   743            s.quick_ratio() >= cutoff and \
       
   744            s.ratio() >= cutoff:
       
   745             result.append((s.ratio(), x))
       
   746 
       
   747     # Move the best scorers to head of list
       
   748     result = heapq.nlargest(n, result)
       
   749     # Strip scores for the best n matches
       
   750     return [x for score, x in result]
       
   751 
       
   752 def _count_leading(line, ch):
       
   753     """
       
   754     Return number of `ch` characters at the start of `line`.
       
   755 
       
   756     Example:
       
   757 
       
   758     >>> _count_leading('   abc', ' ')
       
   759     3
       
   760     """
       
   761 
       
   762     i, n = 0, len(line)
       
   763     while i < n and line[i] == ch:
       
   764         i += 1
       
   765     return i
       
   766 
       
   767 class Differ:
       
   768     r"""
       
   769     Differ is a class for comparing sequences of lines of text, and
       
   770     producing human-readable differences or deltas.  Differ uses
       
   771     SequenceMatcher both to compare sequences of lines, and to compare
       
   772     sequences of characters within similar (near-matching) lines.
       
   773 
       
   774     Each line of a Differ delta begins with a two-letter code:
       
   775 
       
   776         '- '    line unique to sequence 1
       
   777         '+ '    line unique to sequence 2
       
   778         '  '    line common to both sequences
       
   779         '? '    line not present in either input sequence
       
   780 
       
   781     Lines beginning with '? ' attempt to guide the eye to intraline
       
   782     differences, and were not present in either input sequence.  These lines
       
   783     can be confusing if the sequences contain tab characters.
       
   784 
       
   785     Note that Differ makes no claim to produce a *minimal* diff.  To the
       
   786     contrary, minimal diffs are often counter-intuitive, because they synch
       
   787     up anywhere possible, sometimes accidental matches 100 pages apart.
       
   788     Restricting synch points to contiguous matches preserves some notion of
       
   789     locality, at the occasional cost of producing a longer diff.
       
   790 
       
   791     Example: Comparing two texts.
       
   792 
       
   793     First we set up the texts, sequences of individual single-line strings
       
   794     ending with newlines (such sequences can also be obtained from the
       
   795     `readlines()` method of file-like objects):
       
   796 
       
   797     >>> text1 = '''  1. Beautiful is better than ugly.
       
   798     ...   2. Explicit is better than implicit.
       
   799     ...   3. Simple is better than complex.
       
   800     ...   4. Complex is better than complicated.
       
   801     ... '''.splitlines(1)
       
   802     >>> len(text1)
       
   803     4
       
   804     >>> text1[0][-1]
       
   805     '\n'
       
   806     >>> text2 = '''  1. Beautiful is better than ugly.
       
   807     ...   3.   Simple is better than complex.
       
   808     ...   4. Complicated is better than complex.
       
   809     ...   5. Flat is better than nested.
       
   810     ... '''.splitlines(1)
       
   811 
       
   812     Next we instantiate a Differ object:
       
   813 
       
   814     >>> d = Differ()
       
   815 
       
   816     Note that when instantiating a Differ object we may pass functions to
       
   817     filter out line and character 'junk'.  See Differ.__init__ for details.
       
   818 
       
   819     Finally, we compare the two:
       
   820 
       
   821     >>> result = list(d.compare(text1, text2))
       
   822 
       
   823     'result' is a list of strings, so let's pretty-print it:
       
   824 
       
   825     >>> from pprint import pprint as _pprint
       
   826     >>> _pprint(result)
       
   827     ['    1. Beautiful is better than ugly.\n',
       
   828      '-   2. Explicit is better than implicit.\n',
       
   829      '-   3. Simple is better than complex.\n',
       
   830      '+   3.   Simple is better than complex.\n',
       
   831      '?     ++\n',
       
   832      '-   4. Complex is better than complicated.\n',
       
   833      '?            ^                     ---- ^\n',
       
   834      '+   4. Complicated is better than complex.\n',
       
   835      '?           ++++ ^                      ^\n',
       
   836      '+   5. Flat is better than nested.\n']
       
   837 
       
   838     As a single multi-line string it looks like this:
       
   839 
       
   840     >>> print ''.join(result),
       
   841         1. Beautiful is better than ugly.
       
   842     -   2. Explicit is better than implicit.
       
   843     -   3. Simple is better than complex.
       
   844     +   3.   Simple is better than complex.
       
   845     ?     ++
       
   846     -   4. Complex is better than complicated.
       
   847     ?            ^                     ---- ^
       
   848     +   4. Complicated is better than complex.
       
   849     ?           ++++ ^                      ^
       
   850     +   5. Flat is better than nested.
       
   851 
       
   852     Methods:
       
   853 
       
   854     __init__(linejunk=None, charjunk=None)
       
   855         Construct a text differencer, with optional filters.
       
   856 
       
   857     compare(a, b)
       
   858         Compare two sequences of lines; generate the resulting delta.
       
   859     """
       
   860 
       
   861     def __init__(self, linejunk=None, charjunk=None):
       
   862         """
       
   863         Construct a text differencer, with optional filters.
       
   864 
       
   865         The two optional keyword parameters are for filter functions:
       
   866 
       
   867         - `linejunk`: A function that should accept a single string argument,
       
   868           and return true iff the string is junk. The module-level function
       
   869           `IS_LINE_JUNK` may be used to filter out lines without visible
       
   870           characters, except for at most one splat ('#').  It is recommended
       
   871           to leave linejunk None; as of Python 2.3, the underlying
       
   872           SequenceMatcher class has grown an adaptive notion of "noise" lines
       
   873           that's better than any static definition the author has ever been
       
   874           able to craft.
       
   875 
       
   876         - `charjunk`: A function that should accept a string of length 1. The
       
   877           module-level function `IS_CHARACTER_JUNK` may be used to filter out
       
   878           whitespace characters (a blank or tab; **note**: bad idea to include
       
   879           newline in this!).  Use of IS_CHARACTER_JUNK is recommended.
       
   880         """
       
   881 
       
   882         self.linejunk = linejunk
       
   883         self.charjunk = charjunk
       
   884 
       
   885     def compare(self, a, b):
       
   886         r"""
       
   887         Compare two sequences of lines; generate the resulting delta.
       
   888 
       
   889         Each sequence must contain individual single-line strings ending with
       
   890         newlines. Such sequences can be obtained from the `readlines()` method
       
   891         of file-like objects.  The delta generated also consists of newline-
       
   892         terminated strings, ready to be printed as-is via the writeline()
       
   893         method of a file-like object.
       
   894 
       
   895         Example:
       
   896 
       
   897         >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
       
   898         ...                                'ore\ntree\nemu\n'.splitlines(1))),
       
   899         - one
       
   900         ?  ^
       
   901         + ore
       
   902         ?  ^
       
   903         - two
       
   904         - three
       
   905         ?  -
       
   906         + tree
       
   907         + emu
       
   908         """
       
   909 
       
   910         cruncher = SequenceMatcher(self.linejunk, a, b)
       
   911         for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
       
   912             if tag == 'replace':
       
   913                 g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
       
   914             elif tag == 'delete':
       
   915                 g = self._dump('-', a, alo, ahi)
       
   916             elif tag == 'insert':
       
   917                 g = self._dump('+', b, blo, bhi)
       
   918             elif tag == 'equal':
       
   919                 g = self._dump(' ', a, alo, ahi)
       
   920             else:
       
   921                 raise ValueError, 'unknown tag %r' % (tag,)
       
   922 
       
   923             for line in g:
       
   924                 yield line
       
   925 
       
   926     def _dump(self, tag, x, lo, hi):
       
   927         """Generate comparison results for a same-tagged range."""
       
   928         for i in xrange(lo, hi):
       
   929             yield '%s %s' % (tag, x[i])
       
   930 
       
   931     def _plain_replace(self, a, alo, ahi, b, blo, bhi):
       
   932         assert alo < ahi and blo < bhi
       
   933         # dump the shorter block first -- reduces the burden on short-term
       
   934         # memory if the blocks are of very different sizes
       
   935         if bhi - blo < ahi - alo:
       
   936             first  = self._dump('+', b, blo, bhi)
       
   937             second = self._dump('-', a, alo, ahi)
       
   938         else:
       
   939             first  = self._dump('-', a, alo, ahi)
       
   940             second = self._dump('+', b, blo, bhi)
       
   941 
       
   942         for g in first, second:
       
   943             for line in g:
       
   944                 yield line
       
   945 
       
   946     def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
       
   947         r"""
       
   948         When replacing one block of lines with another, search the blocks
       
   949         for *similar* lines; the best-matching pair (if any) is used as a
       
   950         synch point, and intraline difference marking is done on the
       
   951         similar pair. Lots of work, but often worth it.
       
   952 
       
   953         Example:
       
   954 
       
   955         >>> d = Differ()
       
   956         >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
       
   957         ...                            ['abcdefGhijkl\n'], 0, 1)
       
   958         >>> print ''.join(results),
       
   959         - abcDefghiJkl
       
   960         ?    ^  ^  ^
       
   961         + abcdefGhijkl
       
   962         ?    ^  ^  ^
       
   963         """
       
   964 
       
   965         # don't synch up unless the lines have a similarity score of at
       
   966         # least cutoff; best_ratio tracks the best score seen so far
       
   967         best_ratio, cutoff = 0.74, 0.75
       
   968         cruncher = SequenceMatcher(self.charjunk)
       
   969         eqi, eqj = None, None   # 1st indices of equal lines (if any)
       
   970 
       
   971         # search for the pair that matches best without being identical
       
   972         # (identical lines must be junk lines, & we don't want to synch up
       
   973         # on junk -- unless we have to)
       
   974         for j in xrange(blo, bhi):
       
   975             bj = b[j]
       
   976             cruncher.set_seq2(bj)
       
   977             for i in xrange(alo, ahi):
       
   978                 ai = a[i]
       
   979                 if ai == bj:
       
   980                     if eqi is None:
       
   981                         eqi, eqj = i, j
       
   982                     continue
       
   983                 cruncher.set_seq1(ai)
       
   984                 # computing similarity is expensive, so use the quick
       
   985                 # upper bounds first -- have seen this speed up messy
       
   986                 # compares by a factor of 3.
       
   987                 # note that ratio() is only expensive to compute the first
       
   988                 # time it's called on a sequence pair; the expensive part
       
   989                 # of the computation is cached by cruncher
       
   990                 if cruncher.real_quick_ratio() > best_ratio and \
       
   991                       cruncher.quick_ratio() > best_ratio and \
       
   992                       cruncher.ratio() > best_ratio:
       
   993                     best_ratio, best_i, best_j = cruncher.ratio(), i, j
       
   994         if best_ratio < cutoff:
       
   995             # no non-identical "pretty close" pair
       
   996             if eqi is None:
       
   997                 # no identical pair either -- treat it as a straight replace
       
   998                 for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
       
   999                     yield line
       
  1000                 return
       
  1001             # no close pair, but an identical pair -- synch up on that
       
  1002             best_i, best_j, best_ratio = eqi, eqj, 1.0
       
  1003         else:
       
  1004             # there's a close pair, so forget the identical pair (if any)
       
  1005             eqi = None
       
  1006 
       
  1007         # a[best_i] very similar to b[best_j]; eqi is None iff they're not
       
  1008         # identical
       
  1009 
       
  1010         # pump out diffs from before the synch point
       
  1011         for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
       
  1012             yield line
       
  1013 
       
  1014         # do intraline marking on the synch pair
       
  1015         aelt, belt = a[best_i], b[best_j]
       
  1016         if eqi is None:
       
  1017             # pump out a '-', '?', '+', '?' quad for the synched lines
       
  1018             atags = btags = ""
       
  1019             cruncher.set_seqs(aelt, belt)
       
  1020             for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
       
  1021                 la, lb = ai2 - ai1, bj2 - bj1
       
  1022                 if tag == 'replace':
       
  1023                     atags += '^' * la
       
  1024                     btags += '^' * lb
       
  1025                 elif tag == 'delete':
       
  1026                     atags += '-' * la
       
  1027                 elif tag == 'insert':
       
  1028                     btags += '+' * lb
       
  1029                 elif tag == 'equal':
       
  1030                     atags += ' ' * la
       
  1031                     btags += ' ' * lb
       
  1032                 else:
       
  1033                     raise ValueError, 'unknown tag %r' % (tag,)
       
  1034             for line in self._qformat(aelt, belt, atags, btags):
       
  1035                 yield line
       
  1036         else:
       
  1037             # the synch pair is identical
       
  1038             yield '  ' + aelt
       
  1039 
       
  1040         # pump out diffs from after the synch point
       
  1041         for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
       
  1042             yield line
       
  1043 
       
  1044     def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
       
  1045         g = []
       
  1046         if alo < ahi:
       
  1047             if blo < bhi:
       
  1048                 g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
       
  1049             else:
       
  1050                 g = self._dump('-', a, alo, ahi)
       
  1051         elif blo < bhi:
       
  1052             g = self._dump('+', b, blo, bhi)
       
  1053 
       
  1054         for line in g:
       
  1055             yield line
       
  1056 
       
  1057     def _qformat(self, aline, bline, atags, btags):
       
  1058         r"""
       
  1059         Format "?" output and deal with leading tabs.
       
  1060 
       
  1061         Example:
       
  1062 
       
  1063         >>> d = Differ()
       
  1064         >>> results = d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
       
  1065         ...                      '  ^ ^  ^      ', '+  ^ ^  ^      ')
       
  1066         >>> for line in results: print repr(line)
       
  1067         ...
       
  1068         '- \tabcDefghiJkl\n'
       
  1069         '? \t ^ ^  ^\n'
       
  1070         '+ \t\tabcdefGhijkl\n'
       
  1071         '? \t  ^ ^  ^\n'
       
  1072         """
       
  1073 
       
  1074         # Can hurt, but will probably help most of the time.
       
  1075         common = min(_count_leading(aline, "\t"),
       
  1076                      _count_leading(bline, "\t"))
       
  1077         common = min(common, _count_leading(atags[:common], " "))
       
  1078         atags = atags[common:].rstrip()
       
  1079         btags = btags[common:].rstrip()
       
  1080 
       
  1081         yield "- " + aline
       
  1082         if atags:
       
  1083             yield "? %s%s\n" % ("\t" * common, atags)
       
  1084 
       
  1085         yield "+ " + bline
       
  1086         if btags:
       
  1087             yield "? %s%s\n" % ("\t" * common, btags)
       
  1088 
       
  1089 # With respect to junk, an earlier version of ndiff simply refused to
       
  1090 # *start* a match with a junk element.  The result was cases like this:
       
  1091 #     before: private Thread currentThread;
       
  1092 #     after:  private volatile Thread currentThread;
       
  1093 # If you consider whitespace to be junk, the longest contiguous match
       
  1094 # not starting with junk is "e Thread currentThread".  So ndiff reported
       
  1095 # that "e volatil" was inserted between the 't' and the 'e' in "private".
       
  1096 # While an accurate view, to people that's absurd.  The current version
       
  1097 # looks for matching blocks that are entirely junk-free, then extends the
       
  1098 # longest one of those as far as possible but only with matching junk.
       
  1099 # So now "currentThread" is matched, then extended to suck up the
       
  1100 # preceding blank; then "private" is matched, and extended to suck up the
       
  1101 # following blank; then "Thread" is matched; and finally ndiff reports
       
  1102 # that "volatile " was inserted before "Thread".  The only quibble
       
  1103 # remaining is that perhaps it was really the case that " volatile"
       
  1104 # was inserted after "private".  I can live with that <wink>.
       
  1105 
       
  1106 import re
       
  1107 
       
  1108 def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
       
  1109     r"""
       
  1110     Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
       
  1111 
       
  1112     Examples:
       
  1113 
       
  1114     >>> IS_LINE_JUNK('\n')
       
  1115     True
       
  1116     >>> IS_LINE_JUNK('  #   \n')
       
  1117     True
       
  1118     >>> IS_LINE_JUNK('hello\n')
       
  1119     False
       
  1120     """
       
  1121 
       
  1122     return pat(line) is not None
       
  1123 
       
  1124 def IS_CHARACTER_JUNK(ch, ws=" \t"):
       
  1125     r"""
       
  1126     Return 1 for ignorable character: iff `ch` is a space or tab.
       
  1127 
       
  1128     Examples:
       
  1129 
       
  1130     >>> IS_CHARACTER_JUNK(' ')
       
  1131     True
       
  1132     >>> IS_CHARACTER_JUNK('\t')
       
  1133     True
       
  1134     >>> IS_CHARACTER_JUNK('\n')
       
  1135     False
       
  1136     >>> IS_CHARACTER_JUNK('x')
       
  1137     False
       
  1138     """
       
  1139 
       
  1140     return ch in ws
       
  1141 
       
  1142 
       
  1143 def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
       
  1144                  tofiledate='', n=3, lineterm='\n'):
       
  1145     r"""
       
  1146     Compare two sequences of lines; generate the delta as a unified diff.
       
  1147 
       
  1148     Unified diffs are a compact way of showing line changes and a few
       
  1149     lines of context.  The number of context lines is set by 'n' which
       
  1150     defaults to three.
       
  1151 
       
  1152     By default, the diff control lines (those with ---, +++, or @@) are
       
  1153     created with a trailing newline.  This is helpful so that inputs
       
  1154     created from file.readlines() result in diffs that are suitable for
       
  1155     file.writelines() since both the inputs and outputs have trailing
       
  1156     newlines.
       
  1157 
       
  1158     For inputs that do not have trailing newlines, set the lineterm
       
  1159     argument to "" so that the output will be uniformly newline free.
       
  1160 
       
  1161     The unidiff format normally has a header for filenames and modification
       
  1162     times.  Any or all of these may be specified using strings for
       
  1163     'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.  The modification
       
  1164     times are normally expressed in the format returned by time.ctime().
       
  1165 
       
  1166     Example:
       
  1167 
       
  1168     >>> for line in unified_diff('one two three four'.split(),
       
  1169     ...             'zero one tree four'.split(), 'Original', 'Current',
       
  1170     ...             'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
       
  1171     ...             lineterm=''):
       
  1172     ...     print line
       
  1173     --- Original Sat Jan 26 23:30:50 1991
       
  1174     +++ Current Fri Jun 06 10:20:52 2003
       
  1175     @@ -1,4 +1,4 @@
       
  1176     +zero
       
  1177      one
       
  1178     -two
       
  1179     -three
       
  1180     +tree
       
  1181      four
       
  1182     """
       
  1183 
       
  1184     started = False
       
  1185     for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
       
  1186         if not started:
       
  1187             yield '--- %s %s%s' % (fromfile, fromfiledate, lineterm)
       
  1188             yield '+++ %s %s%s' % (tofile, tofiledate, lineterm)
       
  1189             started = True
       
  1190         i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
       
  1191         yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
       
  1192         for tag, i1, i2, j1, j2 in group:
       
  1193             if tag == 'equal':
       
  1194                 for line in a[i1:i2]:
       
  1195                     yield ' ' + line
       
  1196                 continue
       
  1197             if tag == 'replace' or tag == 'delete':
       
  1198                 for line in a[i1:i2]:
       
  1199                     yield '-' + line
       
  1200             if tag == 'replace' or tag == 'insert':
       
  1201                 for line in b[j1:j2]:
       
  1202                     yield '+' + line
       
  1203 
       
  1204 # See http://www.unix.org/single_unix_specification/
       
  1205 def context_diff(a, b, fromfile='', tofile='',
       
  1206                  fromfiledate='', tofiledate='', n=3, lineterm='\n'):
       
  1207     r"""
       
  1208     Compare two sequences of lines; generate the delta as a context diff.
       
  1209 
       
  1210     Context diffs are a compact way of showing line changes and a few
       
  1211     lines of context.  The number of context lines is set by 'n' which
       
  1212     defaults to three.
       
  1213 
       
  1214     By default, the diff control lines (those with *** or ---) are
       
  1215     created with a trailing newline.  This is helpful so that inputs
       
  1216     created from file.readlines() result in diffs that are suitable for
       
  1217     file.writelines() since both the inputs and outputs have trailing
       
  1218     newlines.
       
  1219 
       
  1220     For inputs that do not have trailing newlines, set the lineterm
       
  1221     argument to "" so that the output will be uniformly newline free.
       
  1222 
       
  1223     The context diff format normally has a header for filenames and
       
  1224     modification times.  Any or all of these may be specified using
       
  1225     strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
       
  1226     The modification times are normally expressed in the format returned
       
  1227     by time.ctime().  If not specified, the strings default to blanks.
       
  1228 
       
  1229     Example:
       
  1230 
       
  1231     >>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
       
  1232     ...       'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current',
       
  1233     ...       'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:22:46 2003')),
       
  1234     *** Original Sat Jan 26 23:30:50 1991
       
  1235     --- Current Fri Jun 06 10:22:46 2003
       
  1236     ***************
       
  1237     *** 1,4 ****
       
  1238       one
       
  1239     ! two
       
  1240     ! three
       
  1241       four
       
  1242     --- 1,4 ----
       
  1243     + zero
       
  1244       one
       
  1245     ! tree
       
  1246       four
       
  1247     """
       
  1248 
       
  1249     started = False
       
  1250     prefixmap = {'insert':'+ ', 'delete':'- ', 'replace':'! ', 'equal':'  '}
       
  1251     for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
       
  1252         if not started:
       
  1253             yield '*** %s %s%s' % (fromfile, fromfiledate, lineterm)
       
  1254             yield '--- %s %s%s' % (tofile, tofiledate, lineterm)
       
  1255             started = True
       
  1256 
       
  1257         yield '***************%s' % (lineterm,)
       
  1258         if group[-1][2] - group[0][1] >= 2:
       
  1259             yield '*** %d,%d ****%s' % (group[0][1]+1, group[-1][2], lineterm)
       
  1260         else:
       
  1261             yield '*** %d ****%s' % (group[-1][2], lineterm)
       
  1262         visiblechanges = [e for e in group if e[0] in ('replace', 'delete')]
       
  1263         if visiblechanges:
       
  1264             for tag, i1, i2, _, _ in group:
       
  1265                 if tag != 'insert':
       
  1266                     for line in a[i1:i2]:
       
  1267                         yield prefixmap[tag] + line
       
  1268 
       
  1269         if group[-1][4] - group[0][3] >= 2:
       
  1270             yield '--- %d,%d ----%s' % (group[0][3]+1, group[-1][4], lineterm)
       
  1271         else:
       
  1272             yield '--- %d ----%s' % (group[-1][4], lineterm)
       
  1273         visiblechanges = [e for e in group if e[0] in ('replace', 'insert')]
       
  1274         if visiblechanges:
       
  1275             for tag, _, _, j1, j2 in group:
       
  1276                 if tag != 'delete':
       
  1277                     for line in b[j1:j2]:
       
  1278                         yield prefixmap[tag] + line
       
  1279 
       
  1280 def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
       
  1281     r"""
       
  1282     Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
       
  1283 
       
  1284     Optional keyword parameters `linejunk` and `charjunk` are for filter
       
  1285     functions (or None):
       
  1286 
       
  1287     - linejunk: A function that should accept a single string argument, and
       
  1288       return true iff the string is junk.  The default is None, and is
       
  1289       recommended; as of Python 2.3, an adaptive notion of "noise" lines is
       
  1290       used that does a good job on its own.
       
  1291 
       
  1292     - charjunk: A function that should accept a string of length 1. The
       
  1293       default is module-level function IS_CHARACTER_JUNK, which filters out
       
  1294       whitespace characters (a blank or tab; note: bad idea to include newline
       
  1295       in this!).
       
  1296 
       
  1297     Tools/scripts/ndiff.py is a command-line front-end to this function.
       
  1298 
       
  1299     Example:
       
  1300 
       
  1301     >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
       
  1302     ...              'ore\ntree\nemu\n'.splitlines(1))
       
  1303     >>> print ''.join(diff),
       
  1304     - one
       
  1305     ?  ^
       
  1306     + ore
       
  1307     ?  ^
       
  1308     - two
       
  1309     - three
       
  1310     ?  -
       
  1311     + tree
       
  1312     + emu
       
  1313     """
       
  1314     return Differ(linejunk, charjunk).compare(a, b)
       
  1315 
       
  1316 def _mdiff(fromlines, tolines, context=None, linejunk=None,
       
  1317            charjunk=IS_CHARACTER_JUNK):
       
  1318     r"""Returns generator yielding marked up from/to side by side differences.
       
  1319 
       
  1320     Arguments:
       
  1321     fromlines -- list of text lines to compared to tolines
       
  1322     tolines -- list of text lines to be compared to fromlines
       
  1323     context -- number of context lines to display on each side of difference,
       
  1324                if None, all from/to text lines will be generated.
       
  1325     linejunk -- passed on to ndiff (see ndiff documentation)
       
  1326     charjunk -- passed on to ndiff (see ndiff documentation)
       
  1327 
       
  1328     This function returns an interator which returns a tuple:
       
  1329     (from line tuple, to line tuple, boolean flag)
       
  1330 
       
  1331     from/to line tuple -- (line num, line text)
       
  1332         line num -- integer or None (to indicate a context seperation)
       
  1333         line text -- original line text with following markers inserted:
       
  1334             '\0+' -- marks start of added text
       
  1335             '\0-' -- marks start of deleted text
       
  1336             '\0^' -- marks start of changed text
       
  1337             '\1' -- marks end of added/deleted/changed text
       
  1338 
       
  1339     boolean flag -- None indicates context separation, True indicates
       
  1340         either "from" or "to" line contains a change, otherwise False.
       
  1341 
       
  1342     This function/iterator was originally developed to generate side by side
       
  1343     file difference for making HTML pages (see HtmlDiff class for example
       
  1344     usage).
       
  1345 
       
  1346     Note, this function utilizes the ndiff function to generate the side by
       
  1347     side difference markup.  Optional ndiff arguments may be passed to this
       
  1348     function and they in turn will be passed to ndiff.
       
  1349     """
       
  1350     import re
       
  1351 
       
  1352     # regular expression for finding intraline change indices
       
  1353     change_re = re.compile('(\++|\-+|\^+)')
       
  1354 
       
  1355     # create the difference iterator to generate the differences
       
  1356     diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
       
  1357 
       
  1358     def _make_line(lines, format_key, side, num_lines=[0,0]):
       
  1359         """Returns line of text with user's change markup and line formatting.
       
  1360 
       
  1361         lines -- list of lines from the ndiff generator to produce a line of
       
  1362                  text from.  When producing the line of text to return, the
       
  1363                  lines used are removed from this list.
       
  1364         format_key -- '+' return first line in list with "add" markup around
       
  1365                           the entire line.
       
  1366                       '-' return first line in list with "delete" markup around
       
  1367                           the entire line.
       
  1368                       '?' return first line in list with add/delete/change
       
  1369                           intraline markup (indices obtained from second line)
       
  1370                       None return first line in list with no markup
       
  1371         side -- indice into the num_lines list (0=from,1=to)
       
  1372         num_lines -- from/to current line number.  This is NOT intended to be a
       
  1373                      passed parameter.  It is present as a keyword argument to
       
  1374                      maintain memory of the current line numbers between calls
       
  1375                      of this function.
       
  1376 
       
  1377         Note, this function is purposefully not defined at the module scope so
       
  1378         that data it needs from its parent function (within whose context it
       
  1379         is defined) does not need to be of module scope.
       
  1380         """
       
  1381         num_lines[side] += 1
       
  1382         # Handle case where no user markup is to be added, just return line of
       
  1383         # text with user's line format to allow for usage of the line number.
       
  1384         if format_key is None:
       
  1385             return (num_lines[side],lines.pop(0)[2:])
       
  1386         # Handle case of intraline changes
       
  1387         if format_key == '?':
       
  1388             text, markers = lines.pop(0), lines.pop(0)
       
  1389             # find intraline changes (store change type and indices in tuples)
       
  1390             sub_info = []
       
  1391             def record_sub_info(match_object,sub_info=sub_info):
       
  1392                 sub_info.append([match_object.group(1)[0],match_object.span()])
       
  1393                 return match_object.group(1)
       
  1394             change_re.sub(record_sub_info,markers)
       
  1395             # process each tuple inserting our special marks that won't be
       
  1396             # noticed by an xml/html escaper.
       
  1397             for key,(begin,end) in sub_info[::-1]:
       
  1398                 text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
       
  1399             text = text[2:]
       
  1400         # Handle case of add/delete entire line
       
  1401         else:
       
  1402             text = lines.pop(0)[2:]
       
  1403             # if line of text is just a newline, insert a space so there is
       
  1404             # something for the user to highlight and see.
       
  1405             if not text:
       
  1406                 text = ' '
       
  1407             # insert marks that won't be noticed by an xml/html escaper.
       
  1408             text = '\0' + format_key + text + '\1'
       
  1409         # Return line of text, first allow user's line formatter to do its
       
  1410         # thing (such as adding the line number) then replace the special
       
  1411         # marks with what the user's change markup.
       
  1412         return (num_lines[side],text)
       
  1413 
       
  1414     def _line_iterator():
       
  1415         """Yields from/to lines of text with a change indication.
       
  1416 
       
  1417         This function is an iterator.  It itself pulls lines from a
       
  1418         differencing iterator, processes them and yields them.  When it can
       
  1419         it yields both a "from" and a "to" line, otherwise it will yield one
       
  1420         or the other.  In addition to yielding the lines of from/to text, a
       
  1421         boolean flag is yielded to indicate if the text line(s) have
       
  1422         differences in them.
       
  1423 
       
  1424         Note, this function is purposefully not defined at the module scope so
       
  1425         that data it needs from its parent function (within whose context it
       
  1426         is defined) does not need to be of module scope.
       
  1427         """
       
  1428         lines = []
       
  1429         num_blanks_pending, num_blanks_to_yield = 0, 0
       
  1430         while True:
       
  1431             # Load up next 4 lines so we can look ahead, create strings which
       
  1432             # are a concatenation of the first character of each of the 4 lines
       
  1433             # so we can do some very readable comparisons.
       
  1434             while len(lines) < 4:
       
  1435                 try:
       
  1436                     lines.append(diff_lines_iterator.next())
       
  1437                 except StopIteration:
       
  1438                     lines.append('X')
       
  1439             s = ''.join([line[0] for line in lines])
       
  1440             if s.startswith('X'):
       
  1441                 # When no more lines, pump out any remaining blank lines so the
       
  1442                 # corresponding add/delete lines get a matching blank line so
       
  1443                 # all line pairs get yielded at the next level.
       
  1444                 num_blanks_to_yield = num_blanks_pending
       
  1445             elif s.startswith('-?+?'):
       
  1446                 # simple intraline change
       
  1447                 yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
       
  1448                 continue
       
  1449             elif s.startswith('--++'):
       
  1450                 # in delete block, add block coming: we do NOT want to get
       
  1451                 # caught up on blank lines yet, just process the delete line
       
  1452                 num_blanks_pending -= 1
       
  1453                 yield _make_line(lines,'-',0), None, True
       
  1454                 continue
       
  1455             elif s.startswith(('--?+', '--+', '- ')):
       
  1456                 # in delete block and see a intraline change or unchanged line
       
  1457                 # coming: yield the delete line and then blanks
       
  1458                 from_line,to_line = _make_line(lines,'-',0), None
       
  1459                 num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
       
  1460             elif s.startswith('-+?'):
       
  1461                 # intraline change
       
  1462                 yield _make_line(lines,None,0), _make_line(lines,'?',1), True
       
  1463                 continue
       
  1464             elif s.startswith('-?+'):
       
  1465                 # intraline change
       
  1466                 yield _make_line(lines,'?',0), _make_line(lines,None,1), True
       
  1467                 continue
       
  1468             elif s.startswith('-'):
       
  1469                 # delete FROM line
       
  1470                 num_blanks_pending -= 1
       
  1471                 yield _make_line(lines,'-',0), None, True
       
  1472                 continue
       
  1473             elif s.startswith('+--'):
       
  1474                 # in add block, delete block coming: we do NOT want to get
       
  1475                 # caught up on blank lines yet, just process the add line
       
  1476                 num_blanks_pending += 1
       
  1477                 yield None, _make_line(lines,'+',1), True
       
  1478                 continue
       
  1479             elif s.startswith(('+ ', '+-')):
       
  1480                 # will be leaving an add block: yield blanks then add line
       
  1481                 from_line, to_line = None, _make_line(lines,'+',1)
       
  1482                 num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
       
  1483             elif s.startswith('+'):
       
  1484                 # inside an add block, yield the add line
       
  1485                 num_blanks_pending += 1
       
  1486                 yield None, _make_line(lines,'+',1), True
       
  1487                 continue
       
  1488             elif s.startswith(' '):
       
  1489                 # unchanged text, yield it to both sides
       
  1490                 yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
       
  1491                 continue
       
  1492             # Catch up on the blank lines so when we yield the next from/to
       
  1493             # pair, they are lined up.
       
  1494             while(num_blanks_to_yield < 0):
       
  1495                 num_blanks_to_yield += 1
       
  1496                 yield None,('','\n'),True
       
  1497             while(num_blanks_to_yield > 0):
       
  1498                 num_blanks_to_yield -= 1
       
  1499                 yield ('','\n'),None,True
       
  1500             if s.startswith('X'):
       
  1501                 raise StopIteration
       
  1502             else:
       
  1503                 yield from_line,to_line,True
       
  1504 
       
  1505     def _line_pair_iterator():
       
  1506         """Yields from/to lines of text with a change indication.
       
  1507 
       
  1508         This function is an iterator.  It itself pulls lines from the line
       
  1509         iterator.  Its difference from that iterator is that this function
       
  1510         always yields a pair of from/to text lines (with the change
       
  1511         indication).  If necessary it will collect single from/to lines
       
  1512         until it has a matching pair from/to pair to yield.
       
  1513 
       
  1514         Note, this function is purposefully not defined at the module scope so
       
  1515         that data it needs from its parent function (within whose context it
       
  1516         is defined) does not need to be of module scope.
       
  1517         """
       
  1518         line_iterator = _line_iterator()
       
  1519         fromlines,tolines=[],[]
       
  1520         while True:
       
  1521             # Collecting lines of text until we have a from/to pair
       
  1522             while (len(fromlines)==0 or len(tolines)==0):
       
  1523                 from_line, to_line, found_diff =line_iterator.next()
       
  1524                 if from_line is not None:
       
  1525                     fromlines.append((from_line,found_diff))
       
  1526                 if to_line is not None:
       
  1527                     tolines.append((to_line,found_diff))
       
  1528             # Once we have a pair, remove them from the collection and yield it
       
  1529             from_line, fromDiff = fromlines.pop(0)
       
  1530             to_line, to_diff = tolines.pop(0)
       
  1531             yield (from_line,to_line,fromDiff or to_diff)
       
  1532 
       
  1533     # Handle case where user does not want context differencing, just yield
       
  1534     # them up without doing anything else with them.
       
  1535     line_pair_iterator = _line_pair_iterator()
       
  1536     if context is None:
       
  1537         while True:
       
  1538             yield line_pair_iterator.next()
       
  1539     # Handle case where user wants context differencing.  We must do some
       
  1540     # storage of lines until we know for sure that they are to be yielded.
       
  1541     else:
       
  1542         context += 1
       
  1543         lines_to_write = 0
       
  1544         while True:
       
  1545             # Store lines up until we find a difference, note use of a
       
  1546             # circular queue because we only need to keep around what
       
  1547             # we need for context.
       
  1548             index, contextLines = 0, [None]*(context)
       
  1549             found_diff = False
       
  1550             while(found_diff is False):
       
  1551                 from_line, to_line, found_diff = line_pair_iterator.next()
       
  1552                 i = index % context
       
  1553                 contextLines[i] = (from_line, to_line, found_diff)
       
  1554                 index += 1
       
  1555             # Yield lines that we have collected so far, but first yield
       
  1556             # the user's separator.
       
  1557             if index > context:
       
  1558                 yield None, None, None
       
  1559                 lines_to_write = context
       
  1560             else:
       
  1561                 lines_to_write = index
       
  1562                 index = 0
       
  1563             while(lines_to_write):
       
  1564                 i = index % context
       
  1565                 index += 1
       
  1566                 yield contextLines[i]
       
  1567                 lines_to_write -= 1
       
  1568             # Now yield the context lines after the change
       
  1569             lines_to_write = context-1
       
  1570             while(lines_to_write):
       
  1571                 from_line, to_line, found_diff = line_pair_iterator.next()
       
  1572                 # If another change within the context, extend the context
       
  1573                 if found_diff:
       
  1574                     lines_to_write = context-1
       
  1575                 else:
       
  1576                     lines_to_write -= 1
       
  1577                 yield from_line, to_line, found_diff
       
  1578 
       
  1579 
       
  1580 _file_template = """
       
  1581 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
       
  1582           "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
       
  1583 
       
  1584 <html>
       
  1585 
       
  1586 <head>
       
  1587     <meta http-equiv="Content-Type"
       
  1588           content="text/html; charset=ISO-8859-1" />
       
  1589     <title></title>
       
  1590     <style type="text/css">%(styles)s
       
  1591     </style>
       
  1592 </head>
       
  1593 
       
  1594 <body>
       
  1595     %(table)s%(legend)s
       
  1596 </body>
       
  1597 
       
  1598 </html>"""
       
  1599 
       
  1600 _styles = """
       
  1601         table.diff {font-family:Courier; border:medium;}
       
  1602         .diff_header {background-color:#e0e0e0}
       
  1603         td.diff_header {text-align:right}
       
  1604         .diff_next {background-color:#c0c0c0}
       
  1605         .diff_add {background-color:#aaffaa}
       
  1606         .diff_chg {background-color:#ffff77}
       
  1607         .diff_sub {background-color:#ffaaaa}"""
       
  1608 
       
  1609 _table_template = """
       
  1610     <table class="diff" id="difflib_chg_%(prefix)s_top"
       
  1611            cellspacing="0" cellpadding="0" rules="groups" >
       
  1612         <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
       
  1613         <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
       
  1614         %(header_row)s
       
  1615         <tbody>
       
  1616 %(data_rows)s        </tbody>
       
  1617     </table>"""
       
  1618 
       
  1619 _legend = """
       
  1620     <table class="diff" summary="Legends">
       
  1621         <tr> <th colspan="2"> Legends </th> </tr>
       
  1622         <tr> <td> <table border="" summary="Colors">
       
  1623                       <tr><th> Colors </th> </tr>
       
  1624                       <tr><td class="diff_add">&nbsp;Added&nbsp;</td></tr>
       
  1625                       <tr><td class="diff_chg">Changed</td> </tr>
       
  1626                       <tr><td class="diff_sub">Deleted</td> </tr>
       
  1627                   </table></td>
       
  1628              <td> <table border="" summary="Links">
       
  1629                       <tr><th colspan="2"> Links </th> </tr>
       
  1630                       <tr><td>(f)irst change</td> </tr>
       
  1631                       <tr><td>(n)ext change</td> </tr>
       
  1632                       <tr><td>(t)op</td> </tr>
       
  1633                   </table></td> </tr>
       
  1634     </table>"""
       
  1635 
       
  1636 class HtmlDiff(object):
       
  1637     """For producing HTML side by side comparison with change highlights.
       
  1638 
       
  1639     This class can be used to create an HTML table (or a complete HTML file
       
  1640     containing the table) showing a side by side, line by line comparison
       
  1641     of text with inter-line and intra-line change highlights.  The table can
       
  1642     be generated in either full or contextual difference mode.
       
  1643 
       
  1644     The following methods are provided for HTML generation:
       
  1645 
       
  1646     make_table -- generates HTML for a single side by side table
       
  1647     make_file -- generates complete HTML file with a single side by side table
       
  1648 
       
  1649     See tools/scripts/diff.py for an example usage of this class.
       
  1650     """
       
  1651 
       
  1652     _file_template = _file_template
       
  1653     _styles = _styles
       
  1654     _table_template = _table_template
       
  1655     _legend = _legend
       
  1656     _default_prefix = 0
       
  1657 
       
  1658     def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
       
  1659                  charjunk=IS_CHARACTER_JUNK):
       
  1660         """HtmlDiff instance initializer
       
  1661 
       
  1662         Arguments:
       
  1663         tabsize -- tab stop spacing, defaults to 8.
       
  1664         wrapcolumn -- column number where lines are broken and wrapped,
       
  1665             defaults to None where lines are not wrapped.
       
  1666         linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
       
  1667             HtmlDiff() to generate the side by side HTML differences).  See
       
  1668             ndiff() documentation for argument default values and descriptions.
       
  1669         """
       
  1670         self._tabsize = tabsize
       
  1671         self._wrapcolumn = wrapcolumn
       
  1672         self._linejunk = linejunk
       
  1673         self._charjunk = charjunk
       
  1674 
       
  1675     def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
       
  1676                   numlines=5):
       
  1677         """Returns HTML file of side by side comparison with change highlights
       
  1678 
       
  1679         Arguments:
       
  1680         fromlines -- list of "from" lines
       
  1681         tolines -- list of "to" lines
       
  1682         fromdesc -- "from" file column header string
       
  1683         todesc -- "to" file column header string
       
  1684         context -- set to True for contextual differences (defaults to False
       
  1685             which shows full differences).
       
  1686         numlines -- number of context lines.  When context is set True,
       
  1687             controls number of lines displayed before and after the change.
       
  1688             When context is False, controls the number of lines to place
       
  1689             the "next" link anchors before the next change (so click of
       
  1690             "next" link jumps to just before the change).
       
  1691         """
       
  1692 
       
  1693         return self._file_template % dict(
       
  1694             styles = self._styles,
       
  1695             legend = self._legend,
       
  1696             table = self.make_table(fromlines,tolines,fromdesc,todesc,
       
  1697                                     context=context,numlines=numlines))
       
  1698 
       
  1699     def _tab_newline_replace(self,fromlines,tolines):
       
  1700         """Returns from/to line lists with tabs expanded and newlines removed.
       
  1701 
       
  1702         Instead of tab characters being replaced by the number of spaces
       
  1703         needed to fill in to the next tab stop, this function will fill
       
  1704         the space with tab characters.  This is done so that the difference
       
  1705         algorithms can identify changes in a file when tabs are replaced by
       
  1706         spaces and vice versa.  At the end of the HTML generation, the tab
       
  1707         characters will be replaced with a nonbreakable space.
       
  1708         """
       
  1709         def expand_tabs(line):
       
  1710             # hide real spaces
       
  1711             line = line.replace(' ','\0')
       
  1712             # expand tabs into spaces
       
  1713             line = line.expandtabs(self._tabsize)
       
  1714             # relace spaces from expanded tabs back into tab characters
       
  1715             # (we'll replace them with markup after we do differencing)
       
  1716             line = line.replace(' ','\t')
       
  1717             return line.replace('\0',' ').rstrip('\n')
       
  1718         fromlines = [expand_tabs(line) for line in fromlines]
       
  1719         tolines = [expand_tabs(line) for line in tolines]
       
  1720         return fromlines,tolines
       
  1721 
       
  1722     def _split_line(self,data_list,line_num,text):
       
  1723         """Builds list of text lines by splitting text lines at wrap point
       
  1724 
       
  1725         This function will determine if the input text line needs to be
       
  1726         wrapped (split) into separate lines.  If so, the first wrap point
       
  1727         will be determined and the first line appended to the output
       
  1728         text line list.  This function is used recursively to handle
       
  1729         the second part of the split line to further split it.
       
  1730         """
       
  1731         # if blank line or context separator, just add it to the output list
       
  1732         if not line_num:
       
  1733             data_list.append((line_num,text))
       
  1734             return
       
  1735 
       
  1736         # if line text doesn't need wrapping, just add it to the output list
       
  1737         size = len(text)
       
  1738         max = self._wrapcolumn
       
  1739         if (size <= max) or ((size -(text.count('\0')*3)) <= max):
       
  1740             data_list.append((line_num,text))
       
  1741             return
       
  1742 
       
  1743         # scan text looking for the wrap point, keeping track if the wrap
       
  1744         # point is inside markers
       
  1745         i = 0
       
  1746         n = 0
       
  1747         mark = ''
       
  1748         while n < max and i < size:
       
  1749             if text[i] == '\0':
       
  1750                 i += 1
       
  1751                 mark = text[i]
       
  1752                 i += 1
       
  1753             elif text[i] == '\1':
       
  1754                 i += 1
       
  1755                 mark = ''
       
  1756             else:
       
  1757                 i += 1
       
  1758                 n += 1
       
  1759 
       
  1760         # wrap point is inside text, break it up into separate lines
       
  1761         line1 = text[:i]
       
  1762         line2 = text[i:]
       
  1763 
       
  1764         # if wrap point is inside markers, place end marker at end of first
       
  1765         # line and start marker at beginning of second line because each
       
  1766         # line will have its own table tag markup around it.
       
  1767         if mark:
       
  1768             line1 = line1 + '\1'
       
  1769             line2 = '\0' + mark + line2
       
  1770 
       
  1771         # tack on first line onto the output list
       
  1772         data_list.append((line_num,line1))
       
  1773 
       
  1774         # use this routine again to wrap the remaining text
       
  1775         self._split_line(data_list,'>',line2)
       
  1776 
       
  1777     def _line_wrapper(self,diffs):
       
  1778         """Returns iterator that splits (wraps) mdiff text lines"""
       
  1779 
       
  1780         # pull from/to data and flags from mdiff iterator
       
  1781         for fromdata,todata,flag in diffs:
       
  1782             # check for context separators and pass them through
       
  1783             if flag is None:
       
  1784                 yield fromdata,todata,flag
       
  1785                 continue
       
  1786             (fromline,fromtext),(toline,totext) = fromdata,todata
       
  1787             # for each from/to line split it at the wrap column to form
       
  1788             # list of text lines.
       
  1789             fromlist,tolist = [],[]
       
  1790             self._split_line(fromlist,fromline,fromtext)
       
  1791             self._split_line(tolist,toline,totext)
       
  1792             # yield from/to line in pairs inserting blank lines as
       
  1793             # necessary when one side has more wrapped lines
       
  1794             while fromlist or tolist:
       
  1795                 if fromlist:
       
  1796                     fromdata = fromlist.pop(0)
       
  1797                 else:
       
  1798                     fromdata = ('',' ')
       
  1799                 if tolist:
       
  1800                     todata = tolist.pop(0)
       
  1801                 else:
       
  1802                     todata = ('',' ')
       
  1803                 yield fromdata,todata,flag
       
  1804 
       
  1805     def _collect_lines(self,diffs):
       
  1806         """Collects mdiff output into separate lists
       
  1807 
       
  1808         Before storing the mdiff from/to data into a list, it is converted
       
  1809         into a single line of text with HTML markup.
       
  1810         """
       
  1811 
       
  1812         fromlist,tolist,flaglist = [],[],[]
       
  1813         # pull from/to data and flags from mdiff style iterator
       
  1814         for fromdata,todata,flag in diffs:
       
  1815             try:
       
  1816                 # store HTML markup of the lines into the lists
       
  1817                 fromlist.append(self._format_line(0,flag,*fromdata))
       
  1818                 tolist.append(self._format_line(1,flag,*todata))
       
  1819             except TypeError:
       
  1820                 # exceptions occur for lines where context separators go
       
  1821                 fromlist.append(None)
       
  1822                 tolist.append(None)
       
  1823             flaglist.append(flag)
       
  1824         return fromlist,tolist,flaglist
       
  1825 
       
  1826     def _format_line(self,side,flag,linenum,text):
       
  1827         """Returns HTML markup of "from" / "to" text lines
       
  1828 
       
  1829         side -- 0 or 1 indicating "from" or "to" text
       
  1830         flag -- indicates if difference on line
       
  1831         linenum -- line number (used for line number column)
       
  1832         text -- line text to be marked up
       
  1833         """
       
  1834         try:
       
  1835             linenum = '%d' % linenum
       
  1836             id = ' id="%s%s"' % (self._prefix[side],linenum)
       
  1837         except TypeError:
       
  1838             # handle blank lines where linenum is '>' or ''
       
  1839             id = ''
       
  1840         # replace those things that would get confused with HTML symbols
       
  1841         text=text.replace("&","&amp;").replace(">","&gt;").replace("<","&lt;")
       
  1842 
       
  1843         # make space non-breakable so they don't get compressed or line wrapped
       
  1844         text = text.replace(' ','&nbsp;').rstrip()
       
  1845 
       
  1846         return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
       
  1847                % (id,linenum,text)
       
  1848 
       
  1849     def _make_prefix(self):
       
  1850         """Create unique anchor prefixes"""
       
  1851 
       
  1852         # Generate a unique anchor prefix so multiple tables
       
  1853         # can exist on the same HTML page without conflicts.
       
  1854         fromprefix = "from%d_" % HtmlDiff._default_prefix
       
  1855         toprefix = "to%d_" % HtmlDiff._default_prefix
       
  1856         HtmlDiff._default_prefix += 1
       
  1857         # store prefixes so line format method has access
       
  1858         self._prefix = [fromprefix,toprefix]
       
  1859 
       
  1860     def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
       
  1861         """Makes list of "next" links"""
       
  1862 
       
  1863         # all anchor names will be generated using the unique "to" prefix
       
  1864         toprefix = self._prefix[1]
       
  1865 
       
  1866         # process change flags, generating middle column of next anchors/links
       
  1867         next_id = ['']*len(flaglist)
       
  1868         next_href = ['']*len(flaglist)
       
  1869         num_chg, in_change = 0, False
       
  1870         last = 0
       
  1871         for i,flag in enumerate(flaglist):
       
  1872             if flag:
       
  1873                 if not in_change:
       
  1874                     in_change = True
       
  1875                     last = i
       
  1876                     # at the beginning of a change, drop an anchor a few lines
       
  1877                     # (the context lines) before the change for the previous
       
  1878                     # link
       
  1879                     i = max([0,i-numlines])
       
  1880                     next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
       
  1881                     # at the beginning of a change, drop a link to the next
       
  1882                     # change
       
  1883                     num_chg += 1
       
  1884                     next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
       
  1885                          toprefix,num_chg)
       
  1886             else:
       
  1887                 in_change = False
       
  1888         # check for cases where there is no content to avoid exceptions
       
  1889         if not flaglist:
       
  1890             flaglist = [False]
       
  1891             next_id = ['']
       
  1892             next_href = ['']
       
  1893             last = 0
       
  1894             if context:
       
  1895                 fromlist = ['<td></td><td>&nbsp;No Differences Found&nbsp;</td>']
       
  1896                 tolist = fromlist
       
  1897             else:
       
  1898                 fromlist = tolist = ['<td></td><td>&nbsp;Empty File&nbsp;</td>']
       
  1899         # if not a change on first line, drop a link
       
  1900         if not flaglist[0]:
       
  1901             next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
       
  1902         # redo the last link to link to the top
       
  1903         next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
       
  1904 
       
  1905         return fromlist,tolist,flaglist,next_href,next_id
       
  1906 
       
  1907     def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
       
  1908                    numlines=5):
       
  1909         """Returns HTML table of side by side comparison with change highlights
       
  1910 
       
  1911         Arguments:
       
  1912         fromlines -- list of "from" lines
       
  1913         tolines -- list of "to" lines
       
  1914         fromdesc -- "from" file column header string
       
  1915         todesc -- "to" file column header string
       
  1916         context -- set to True for contextual differences (defaults to False
       
  1917             which shows full differences).
       
  1918         numlines -- number of context lines.  When context is set True,
       
  1919             controls number of lines displayed before and after the change.
       
  1920             When context is False, controls the number of lines to place
       
  1921             the "next" link anchors before the next change (so click of
       
  1922             "next" link jumps to just before the change).
       
  1923         """
       
  1924 
       
  1925         # make unique anchor prefixes so that multiple tables may exist
       
  1926         # on the same page without conflict.
       
  1927         self._make_prefix()
       
  1928 
       
  1929         # change tabs to spaces before it gets more difficult after we insert
       
  1930         # markkup
       
  1931         fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
       
  1932 
       
  1933         # create diffs iterator which generates side by side from/to data
       
  1934         if context:
       
  1935             context_lines = numlines
       
  1936         else:
       
  1937             context_lines = None
       
  1938         diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
       
  1939                       charjunk=self._charjunk)
       
  1940 
       
  1941         # set up iterator to wrap lines that exceed desired width
       
  1942         if self._wrapcolumn:
       
  1943             diffs = self._line_wrapper(diffs)
       
  1944 
       
  1945         # collect up from/to lines and flags into lists (also format the lines)
       
  1946         fromlist,tolist,flaglist = self._collect_lines(diffs)
       
  1947 
       
  1948         # process change flags, generating middle column of next anchors/links
       
  1949         fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
       
  1950             fromlist,tolist,flaglist,context,numlines)
       
  1951 
       
  1952         s = []
       
  1953         fmt = '            <tr><td class="diff_next"%s>%s</td>%s' + \
       
  1954               '<td class="diff_next">%s</td>%s</tr>\n'
       
  1955         for i in range(len(flaglist)):
       
  1956             if flaglist[i] is None:
       
  1957                 # mdiff yields None on separator lines skip the bogus ones
       
  1958                 # generated for the first line
       
  1959                 if i > 0:
       
  1960                     s.append('        </tbody>        \n        <tbody>\n')
       
  1961             else:
       
  1962                 s.append( fmt % (next_id[i],next_href[i],fromlist[i],
       
  1963                                            next_href[i],tolist[i]))
       
  1964         if fromdesc or todesc:
       
  1965             header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
       
  1966                 '<th class="diff_next"><br /></th>',
       
  1967                 '<th colspan="2" class="diff_header">%s</th>' % fromdesc,
       
  1968                 '<th class="diff_next"><br /></th>',
       
  1969                 '<th colspan="2" class="diff_header">%s</th>' % todesc)
       
  1970         else:
       
  1971             header_row = ''
       
  1972 
       
  1973         table = self._table_template % dict(
       
  1974             data_rows=''.join(s),
       
  1975             header_row=header_row,
       
  1976             prefix=self._prefix[1])
       
  1977 
       
  1978         return table.replace('\0+','<span class="diff_add">'). \
       
  1979                      replace('\0-','<span class="diff_sub">'). \
       
  1980                      replace('\0^','<span class="diff_chg">'). \
       
  1981                      replace('\1','</span>'). \
       
  1982                      replace('\t','&nbsp;')
       
  1983 
       
  1984 del re
       
  1985 
       
  1986 def restore(delta, which):
       
  1987     r"""
       
  1988     Generate one of the two sequences that generated a delta.
       
  1989 
       
  1990     Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
       
  1991     lines originating from file 1 or 2 (parameter `which`), stripping off line
       
  1992     prefixes.
       
  1993 
       
  1994     Examples:
       
  1995 
       
  1996     >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
       
  1997     ...              'ore\ntree\nemu\n'.splitlines(1))
       
  1998     >>> diff = list(diff)
       
  1999     >>> print ''.join(restore(diff, 1)),
       
  2000     one
       
  2001     two
       
  2002     three
       
  2003     >>> print ''.join(restore(diff, 2)),
       
  2004     ore
       
  2005     tree
       
  2006     emu
       
  2007     """
       
  2008     try:
       
  2009         tag = {1: "- ", 2: "+ "}[int(which)]
       
  2010     except KeyError:
       
  2011         raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
       
  2012                            % which)
       
  2013     prefixes = ("  ", tag)
       
  2014     for line in delta:
       
  2015         if line[:2] in prefixes:
       
  2016             yield line[2:]
       
  2017 
       
  2018 def _test():
       
  2019     import doctest, difflib
       
  2020     return doctest.testmod(difflib)
       
  2021 
       
  2022 if __name__ == "__main__":
       
  2023     _test()