cpython: a349448474ea (original) (raw)
--- a/Lib/HTMLParser.py +++ b/Lib/HTMLParser.py @@ -23,6 +23,9 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]') +# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state[](#l1.7) +# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state[](#l1.8) +tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]') attrfind = re.compile( r'\s*((?<=['"\s])[^\s/>][^\s/=>])(\s=+\s*' @@ -243,7 +246,7 @@ class HTMLParser(markupbase.ParserBase): # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state[](#l1.14) def parse_bogus_comment(self, i, report=1): rawdata = self.rawdata
if rawdata[i:i+2] != '<!':[](#l1.17)
if rawdata[i:i+2] not in ('<!', '</'):[](#l1.18) self.error('unexpected call to parse_comment()')[](#l1.19) pos = rawdata.find('>', i+2)[](#l1.20) if pos == -1:[](#l1.21)
@@ -353,23 +356,38 @@ class HTMLParser(markupbase.ParserBase): match = endendtag.search(rawdata, i+1) # > if not match: return -1
j = match.end()[](#l1.26)
gtpos = match.end()[](#l1.27) match = endtagfind.match(rawdata, i) # </ + tag + >[](#l1.28) if not match:[](#l1.29) if self.cdata_elem is not None:[](#l1.30)
self.handle_data(rawdata[i:j])[](#l1.31)
return j[](#l1.32)
self.error("bad end tag: %r" % (rawdata[i:j],))[](#l1.33)
self.handle_data(rawdata[i:gtpos])[](#l1.34)
return gtpos[](#l1.35)
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state[](#l1.36)
namematch = tagfind_tolerant.match(rawdata, i+2)[](#l1.37)
if not namematch:[](#l1.38)
# w3.org/TR/html5/tokenization.html#end-tag-open-state[](#l1.39)
if rawdata[i:i+3] == '</>':[](#l1.40)
return i+3[](#l1.41)
else:[](#l1.42)
return self.parse_bogus_comment(i)[](#l1.43)
tagname = namematch.group().lower()[](#l1.44)
# consume and ignore other stuff between the name and the >[](#l1.45)
# Note: this is not 100% correct, since we might have things like[](#l1.46)
# </tag attr=">">, but looking for > after tha name should cover[](#l1.47)
# most of the cases and is much simpler[](#l1.48)
gtpos = rawdata.find('>', namematch.end())[](#l1.49)
self.handle_endtag(tagname)[](#l1.50)
return gtpos+1[](#l1.51)
elem = match.group(1).lower() # script or style if self.cdata_elem is not None: if elem != self.cdata_elem:
self.handle_data(rawdata[i:j])[](#l1.56)
return j[](#l1.57)
self.handle_data(rawdata[i:gtpos])[](#l1.58)
return gtpos[](#l1.59)
self.handle_endtag(elem) self.clear_cdata_mode()
return j[](#l1.63)
return gtpos[](#l1.64)
# Overridable -- finish processing of start+end tag: <tag.../> def handle_startendtag(self, tag, attrs):
--- a/Lib/test/test_htmlparser.py +++ b/Lib/test/test_htmlparser.py @@ -202,12 +202,12 @@ text self._run_check(["", ""], output) def test_starttag_junk_chars(self):
self._parse_error("</>")[](#l2.7)
self._parse_error("</$>")[](#l2.8)
self._run_check("</>", [])[](#l2.9)
self._run_check("</$>", [('comment', '$')])[](#l2.10) self._parse_error("</")[](#l2.11) self._parse_error("</a")[](#l2.12) self._parse_error("<a<a>")[](#l2.13)
self._parse_error("</a<a>")[](#l2.14)
self._run_check("</a<a>", [('endtag', 'a<a')])[](#l2.15) self._parse_error("<!")[](#l2.16) self._parse_error("<a")[](#l2.17) self._parse_error("<a foo='bar'")[](#l2.18)
@@ -232,6 +232,44 @@ text ("endtag", "p"), ])
- def test_invalid_end_tags(self):
# A collection of broken end tags. <br> is used as separator.[](#l2.24)
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state[](#l2.25)
# and #13993[](#l2.26)
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'[](#l2.27)
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')[](#l2.28)
expected = [('starttag', 'br', []),[](#l2.29)
# < is part of the name, / is discarded, p is an attribute[](#l2.30)
('endtag', 'label<'),[](#l2.31)
('starttag', 'br', []),[](#l2.32)
# text and attributes are discarded[](#l2.33)
('endtag', 'div'),[](#l2.34)
('starttag', 'br', []),[](#l2.35)
# comment because the first char after </ is not a-zA-Z[](#l2.36)
('comment', '<h4'),[](#l2.37)
('starttag', 'br', []),[](#l2.38)
# attributes are discarded[](#l2.39)
('endtag', 'li'),[](#l2.40)
('starttag', 'br', []),[](#l2.41)
# everything till ul (included) is discarded[](#l2.42)
('endtag', 'li'),[](#l2.43)
('starttag', 'br', []),[](#l2.44)
# </> is ignored[](#l2.45)
('starttag', 'br', [])][](#l2.46)
self._run_check(html, expected)[](#l2.47)
- def test_broken_invalid_end_tag(self):
# This is technically wrong (the "> shouldn't be included in the 'data')[](#l2.50)
# but is probably not worth fixing it (in addition to all the cases of[](#l2.51)
# the previous test, it would require a full attribute parsing).[](#l2.52)
# see #13993[](#l2.53)
html = '<b>This</b attr=">"> confuses the parser'[](#l2.54)
expected = [('starttag', 'b', []),[](#l2.55)
('data', 'This'),[](#l2.56)
('endtag', 'b'),[](#l2.57)
('data', '"> confuses the parser')][](#l2.58)
self._run_check(html, expected)[](#l2.59)
+ def test_get_starttag_text(self): s = """<foo:bar \n one="1"\ttwo=2 >""" self._run_check_extra(s, [
--- a/Misc/NEWS +++ b/Misc/NEWS @@ -90,6 +90,8 @@ Core and Builtins Library ------- +- Issue #13993: HTMLParser is now able to handle broken end tags. +