all repos — grayfriday @ 2b87b0e786d9ce5961b3c7f74df8a87ff1b01838

blackfriday fork with a few changes

simplify naming of parsing functions
Russ Ross russ@russross.com
Tue, 05 Jul 2011 14:22:21 -0600
commit

2b87b0e786d9ce5961b3c7f74df8a87ff1b01838

parent

fb435fe2e3d19e3a99c1e125feb2ce9b395f96c6

4 files changed, 175 insertions(+), 177 deletions(-)

jump to
M block.goblock.go

@@ -20,9 +20,9 @@

// Parse block-level data. // Note: this function and many that it calls assume that // the input buffer ends with a newline. -func (parser *Parser) parseBlock(out *bytes.Buffer, data []byte) { +func (parser *Parser) block(out *bytes.Buffer, data []byte) { if len(data) == 0 || data[len(data)-1] != '\n' { - panic("parseBlock input is missing terminating newline") + panic("block input is missing terminating newline") } // this is called recursively: enforce a maximum depth

@@ -40,7 +40,7 @@ // ## Header 2

// ... // ###### Header 6 if parser.isPrefixHeader(data) { - data = data[parser.blockPrefixHeader(out, data):] + data = data[parser.prefixHeader(out, data):] continue }

@@ -50,7 +50,7 @@ // <div>

// ... // </div> if data[0] == '<' { - if i := parser.blockHtml(out, data, true); i > 0 { + if i := parser.html(out, data, true); i > 0 { data = data[i:] continue }

@@ -70,8 +70,8 @@ // return a

// } // return b // } - if parser.blockCodePrefix(data) > 0 { - data = data[parser.blockCode(out, data):] + if parser.codePrefix(data) > 0 { + data = data[parser.code(out, data):] continue }

@@ -86,7 +86,7 @@ // return n * fact(n-1)

// } // ``` if parser.flags&EXTENSION_FENCED_CODE != 0 { - if i := parser.blockFencedCode(out, data); i > 0 { + if i := parser.fencedCode(out, data); i > 0 { data = data[i:] continue }

@@ -112,8 +112,8 @@ // block quote:

// // > A big quote I found somewhere // > on the web - if parser.blockQuotePrefix(data) > 0 { - data = data[parser.blockQuote(out, data):] + if parser.quotePrefix(data) > 0 { + data = data[parser.quote(out, data):] continue }

@@ -124,7 +124,7 @@ // ------|-----|---------

// Bob | 31 | 555-1234 // Alice | 27 | 555-4321 if parser.flags&EXTENSION_TABLES != 0 { - if i := parser.blockTable(out, data); i > 0 { + if i := parser.table(out, data); i > 0 { data = data[i:] continue }

@@ -136,8 +136,8 @@ // * Item 1

// * Item 2 // // also works with + or - - if parser.blockUliPrefix(data) > 0 { - data = data[parser.blockList(out, data, 0):] + if parser.uliPrefix(data) > 0 { + data = data[parser.list(out, data, 0):] continue }

@@ -145,14 +145,14 @@ // a numbered/ordered list:

// // 1. Item 1 // 2. Item 2 - if parser.blockOliPrefix(data) > 0 { - data = data[parser.blockList(out, data, LIST_TYPE_ORDERED):] + if parser.oliPrefix(data) > 0 { + data = data[parser.list(out, data, LIST_TYPE_ORDERED):] continue } // anything else must look like a normal paragraph // note: this finds underlined headers, too - data = data[parser.blockParagraph(out, data):] + data = data[parser.paragraph(out, data):] } parser.nesting--

@@ -175,7 +175,7 @@ }

return true } -func (parser *Parser) blockPrefixHeader(out *bytes.Buffer, data []byte) int { +func (parser *Parser) prefixHeader(out *bytes.Buffer, data []byte) int { level := 0 for level < 6 && data[level] == '#' { level++

@@ -194,7 +194,7 @@ end--

} if end > i { work := func() bool { - parser.parseInline(out, data[i:end]) + parser.inline(out, data[i:end]) return true } parser.r.Header(out, work, level)

@@ -238,24 +238,24 @@

return 0 } -func (parser *Parser) blockHtml(out *bytes.Buffer, data []byte, doRender bool) int { +func (parser *Parser) html(out *bytes.Buffer, data []byte, doRender bool) int { var i, j int // identify the opening tag if data[0] != '<' { return 0 } - curtag, tagfound := parser.blockHtmlFindTag(data[1:]) + curtag, tagfound := parser.htmlFindTag(data[1:]) // handle special cases if !tagfound { // check for an HTML comment - if size := parser.blockHtmlComment(out, data, doRender); size > 0 { + if size := parser.htmlComment(out, data, doRender); size > 0 { return size } // check for an <hr> tag - if size := parser.blockHtmlHr(out, data, doRender); size > 0 { + if size := parser.htmlHr(out, data, doRender); size > 0 { return size }

@@ -309,7 +309,7 @@ if i+2+len(curtag) >= len(data) {

break } - j = parser.blockHtmlFindEnd(curtag, data[i-1:]) + j = parser.htmlFindEnd(curtag, data[i-1:]) if j > 0 { i += j - 1

@@ -337,7 +337,7 @@ return i

} // HTML comment, lax form -func (parser *Parser) blockHtmlComment(out *bytes.Buffer, data []byte, doRender bool) int { +func (parser *Parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { return 0 }

@@ -373,7 +373,7 @@ return 0

} // HR, which is the only self-closing block tag considered -func (parser *Parser) blockHtmlHr(out *bytes.Buffer, data []byte, doRender bool) int { +func (parser *Parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { return 0 }

@@ -406,7 +406,7 @@

return 0 } -func (parser *Parser) blockHtmlFindTag(data []byte) (string, bool) { +func (parser *Parser) htmlFindTag(data []byte) (string, bool) { i := 0 for isalnum(data[i]) { i++

@@ -418,7 +418,7 @@ }

return "", false } -func (parser *Parser) blockHtmlFindEnd(tag string, data []byte) int { +func (parser *Parser) htmlFindEnd(tag string, data []byte) int { // assume data[0] == '<' && data[1] == '/' already tested // check if tag is a match

@@ -584,7 +584,7 @@ skip = i + 1

return } -func (parser *Parser) blockFencedCode(out *bytes.Buffer, data []byte) int { +func (parser *Parser) fencedCode(out *bytes.Buffer, data []byte) int { var lang *string beg, marker := parser.isFencedCode(data, &lang, "") if beg == 0 || beg >= len(data) {

@@ -630,9 +630,9 @@

return beg } -func (parser *Parser) blockTable(out *bytes.Buffer, data []byte) int { +func (parser *Parser) table(out *bytes.Buffer, data []byte) int { var header bytes.Buffer - i, columns := parser.blockTableHeader(&header, data) + i, columns := parser.tableHeader(&header, data) if i == 0 { return 0 }

@@ -652,9 +652,9 @@ i = rowStart

break } - // include the newline in data sent to blockTableRow + // include the newline in data sent to tableRow i++ - parser.blockTableRow(&body, data[rowStart:i], columns) + parser.tableRow(&body, data[rowStart:i], columns) } parser.r.Table(out, header.Bytes(), body.Bytes(), columns)

@@ -662,7 +662,7 @@

return i } -func (parser *Parser) blockTableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { +func (parser *Parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { i := 0 colCount := 1 for i = 0; data[i] != '\n'; i++ {

@@ -676,7 +676,7 @@ if colCount == 1 {

return } - // include the newline in the data sent to blockTableRow + // include the newline in the data sent to tableRow header := data[:i+1] // column count ignores pipes at beginning or end of line

@@ -757,12 +757,12 @@ if col != colCount {

return } - parser.blockTableRow(out, header, columns) + parser.tableRow(out, header, columns) size = i + 1 return } -func (parser *Parser) blockTableRow(out *bytes.Buffer, data []byte, columns []int) { +func (parser *Parser) tableRow(out *bytes.Buffer, data []byte, columns []int) { i, col := 0, 0 var rowWork bytes.Buffer

@@ -789,7 +789,7 @@ cellEnd--

} var cellWork bytes.Buffer - parser.parseInline(&cellWork, data[cellStart:cellEnd]) + parser.inline(&cellWork, data[cellStart:cellEnd]) parser.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) }

@@ -804,7 +804,7 @@ parser.r.TableRow(out, rowWork.Bytes())

} // returns blockquote prefix length -func (parser *Parser) blockQuotePrefix(data []byte) int { +func (parser *Parser) quotePrefix(data []byte) int { i := 0 for i < 3 && data[i] == ' ' { i++

@@ -819,7 +819,7 @@ return 0

} // parse a blockquote fragment -func (parser *Parser) blockQuote(out *bytes.Buffer, data []byte) int { +func (parser *Parser) quote(out *bytes.Buffer, data []byte) int { var raw bytes.Buffer beg, end := 0, 0 for beg < len(data) {

@@ -829,17 +829,15 @@ end++

} end++ - if pre := parser.blockQuotePrefix(data[beg:]); pre > 0 { + if pre := parser.quotePrefix(data[beg:]); pre > 0 { // string the prefix beg += pre - } else { + } else if parser.isEmpty(data[beg:]) > 0 && + (end >= len(data) || + (parser.quotePrefix(data[end:]) == 0 && parser.isEmpty(data[end:]) == 0)) { // blockquote ends with at least one blank line // followed by something without a blockquote prefix - if parser.isEmpty(data[beg:]) > 0 && - (end >= len(data) || - (parser.blockQuotePrefix(data[end:]) == 0 && parser.isEmpty(data[end:]) == 0)) { - break - } + break } // this line is part of the blockquote

@@ -848,20 +846,20 @@ beg = end

} var cooked bytes.Buffer - parser.parseBlock(&cooked, raw.Bytes()) + parser.block(&cooked, raw.Bytes()) parser.r.BlockQuote(out, cooked.Bytes()) return end } // returns prefix length for block code -func (parser *Parser) blockCodePrefix(data []byte) int { +func (parser *Parser) codePrefix(data []byte) int { if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { return 4 } return 0 } -func (parser *Parser) blockCode(out *bytes.Buffer, data []byte) int { +func (parser *Parser) code(out *bytes.Buffer, data []byte) int { var work bytes.Buffer i := 0

@@ -873,14 +871,12 @@ }

i++ blankline := parser.isEmpty(data[beg:i]) > 0 - if pre := parser.blockCodePrefix(data[beg:i]); pre > 0 { + if pre := parser.codePrefix(data[beg:i]); pre > 0 { beg += pre - } else { - if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break } // verbatim copy to the working buffeu

@@ -909,7 +905,7 @@ return i

} // returns unordered list item prefix -func (parser *Parser) blockUliPrefix(data []byte) int { +func (parser *Parser) uliPrefix(data []byte) int { i := 0 // start with up to 3 spaces

@@ -926,7 +922,7 @@ return i + 2

} // returns ordered list item prefix -func (parser *Parser) blockOliPrefix(data []byte) int { +func (parser *Parser) oliPrefix(data []byte) int { i := 0 // start with up to 3 spaces

@@ -948,12 +944,12 @@ return i + 2

} // parse ordered or unordered list block -func (parser *Parser) blockList(out *bytes.Buffer, data []byte, flags int) int { +func (parser *Parser) list(out *bytes.Buffer, data []byte, flags int) int { i := 0 flags |= LIST_ITEM_BEGINNING_OF_LIST work := func() bool { for i < len(data) { - skip := parser.blockListItem(out, data[i:], &flags) + skip := parser.listItem(out, data[i:], &flags) i += skip if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {

@@ -970,16 +966,16 @@ }

// Parse a single list item. // Assumes initial prefix is already removed if this is a sublist. -func (parser *Parser) blockListItem(out *bytes.Buffer, data []byte, flags *int) int { +func (parser *Parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { // keep track of the indentation of the first line itemIndent := 0 for itemIndent < 3 && data[itemIndent] == ' ' { itemIndent++ } - i := parser.blockUliPrefix(data) + i := parser.uliPrefix(data) if i == 0 { - i = parser.blockOliPrefix(data) + i = parser.oliPrefix(data) } if i == 0 { return 0

@@ -1035,8 +1031,8 @@

// evaluate how this line fits in switch { // is this a nested list item? - case (parser.blockUliPrefix(chunk) > 0 && !parser.isHRule(chunk)) || - parser.blockOliPrefix(chunk) > 0: + case (parser.uliPrefix(chunk) > 0 && !parser.isHRule(chunk)) || + parser.oliPrefix(chunk) > 0: if containsBlankLine { *flags |= LIST_ITEM_CONTAINS_BLOCK

@@ -1089,18 +1085,18 @@ var cooked bytes.Buffer

if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 { // intermediate render of block li if sublist > 0 { - parser.parseBlock(&cooked, rawBytes[:sublist]) - parser.parseBlock(&cooked, rawBytes[sublist:]) + parser.block(&cooked, rawBytes[:sublist]) + parser.block(&cooked, rawBytes[sublist:]) } else { - parser.parseBlock(&cooked, rawBytes) + parser.block(&cooked, rawBytes) } } else { // intermediate render of inline li if sublist > 0 { - parser.parseInline(&cooked, rawBytes[:sublist]) - parser.parseBlock(&cooked, rawBytes[sublist:]) + parser.inline(&cooked, rawBytes[:sublist]) + parser.block(&cooked, rawBytes[sublist:]) } else { - parser.parseInline(&cooked, rawBytes) + parser.inline(&cooked, rawBytes) } }

@@ -1138,13 +1134,13 @@ end--

} work := func() bool { - parser.parseInline(out, data[beg:end]) + parser.inline(out, data[beg:end]) return true } parser.r.Paragraph(out, work) } -func (parser *Parser) blockParagraph(out *bytes.Buffer, data []byte) int { +func (parser *Parser) paragraph(out *bytes.Buffer, data []byte) int { // prev: index of 1st char of previous line // line: index of 1st char of current line // i: index of cursor/end of current line

@@ -1182,7 +1178,7 @@ // render the header

// this ugly double closure avoids forcing variables onto the heap work := func(o *bytes.Buffer, p *Parser, d []byte) func() bool { return func() bool { - p.parseInline(o, d) + p.inline(o, d) return true } }(out, parser, data[prev:eol])

@@ -1198,7 +1194,7 @@ }

// if the next line starts a block of HTML, then the paragraph ends here if parser.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - if data[i] == '<' && parser.blockHtml(out, current, false) > 0 { + if data[i] == '<' && parser.html(out, current, false) > 0 { // rewind to before the HTML block parser.renderParagraph(out, data[:i]) return i
M inline.goinline.go

@@ -22,7 +22,7 @@ // Each function returns the number of chars taken care of

// data is the complete block being rendered // offset is the number of valid chars before the current cursor -func (parser *Parser) parseInline(out *bytes.Buffer, data []byte) { +func (parser *Parser) inline(out *bytes.Buffer, data []byte) { // this is called recursively: enforce a maximum depth if parser.nesting >= parser.maxNesting { return

@@ -32,7 +32,7 @@

i, end := 0, 0 for i < len(data) { // copy inactive chars into the output - for end < len(data) && parser.inline[data[end]] == nil { + for end < len(data) && parser.inlineCallback[data[end]] == nil { end++ }

@@ -44,7 +44,7 @@ }

i = end // call the trigger - handler := parser.inline[data[end]] + handler := parser.inlineCallback[data[end]] if consumed := handler(parser, out, data, i); consumed == 0 { // no action from the callback; buffer the byte for later end = i + 1

@@ -59,7 +59,7 @@ parser.nesting--

} // single and double emphasis parsing -func inlineEmphasis(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func emphasis(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { data = data[offset:] c := data[0] ret := 0

@@ -70,7 +70,7 @@ // strikethrough only takes two characters '~~'

if c == '~' || isspace(data[1]) { return 0 } - if ret = inlineHelperEmph1(parser, out, data[1:], c); ret == 0 { + if ret = helperEmphasis(parser, out, data[1:], c); ret == 0 { return 0 }

@@ -81,7 +81,7 @@ if len(data) > 3 && data[1] == c && data[2] != c {

if isspace(data[2]) { return 0 } - if ret = inlineHelperEmph2(parser, out, data[2:], c); ret == 0 { + if ret = helperDoubleEmphasis(parser, out, data[2:], c); ret == 0 { return 0 }

@@ -92,7 +92,7 @@ if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {

if c == '~' || isspace(data[3]) { return 0 } - if ret = inlineHelperEmph3(parser, out, data, 3, c); ret == 0 { + if ret = helperTripleEmphasis(parser, out, data, 3, c); ret == 0 { return 0 }

@@ -102,7 +102,7 @@

return 0 } -func inlineCodeSpan(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func codeSpan(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { data = data[offset:] nb := 0

@@ -149,7 +149,7 @@ }

// newline preceded by two spaces becomes <br> // newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled -func inlineLineBreak(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func lineBreak(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { // remove trailing spaces from out outBytes := out.Bytes() end := len(outBytes)

@@ -169,7 +169,7 @@ return 1

} // '[': parse a link or an image -func inlineLink(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func link(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { // no links allowed inside other links if parser.insideLink { return 0

@@ -229,13 +229,16 @@

linkB := i // look for link end: ' " ) + findlinkend: for i < len(data) { - if data[i] == '\\' { + switch { + case data[i] == '\\': i += 2 - } else { - if data[i] == ')' || data[i] == '\'' || data[i] == '"' { - break - } + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: i++ } }

@@ -251,13 +254,16 @@ if data[i] == '\'' || data[i] == '"' {

i++ titleB = i + findtitleend: for i < len(data) { - if data[i] == '\\' { + switch { + case data[i] == '\\': i += 2 - } else { - if data[i] == ')' { - break - } + + case data[i] == ')': + break findtitleend + + default: i++ } }

@@ -398,7 +404,7 @@ } else {

// links cannot contain other links, so turn off link parsing temporarily insideLink := parser.insideLink parser.insideLink = true - parser.parseInline(&content, data[1:txtE]) + parser.inline(&content, data[1:txtE]) parser.insideLink = insideLink } }

@@ -432,7 +438,7 @@ return i

} // '<' when tags or autolinks are allowed -func inlineLAngle(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func leftAngle(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { data = data[offset:] altype := LINK_TYPE_NOT_AUTOLINK end := tagLength(data, &altype)

@@ -455,7 +461,7 @@

// '\\' backslash escape var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>") -func inlineEscape(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func escape(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { data = data[offset:] if len(data) > 1 {

@@ -492,7 +498,7 @@ }

// '&' escaped when it doesn't belong to an entity // valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func inlineEntity(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func entity(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { data = data[offset:] end := 1

@@ -516,7 +522,7 @@

return end } -func inlineAutoLink(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { +func autoLink(parser *Parser, out *bytes.Buffer, data []byte, offset int) int { // quick check to rule out most false hits on ':' if parser.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' { return 0

@@ -689,12 +695,10 @@

for i < len(data) { if data[i] == '\\' { i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break } else { - if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } + i++ } }

@@ -753,7 +757,7 @@ return 0

} // look for the next emph char, skipping other constructs -func inlineHelperFindEmphChar(data []byte, c byte) int { +func helperFindEmphChar(data []byte, c byte) int { i := 1 for i < len(data) {

@@ -787,50 +791,48 @@ if i >= len(data) {

return tmpI } i++ - } else { - if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i } i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } else { - continue - } - } - cc := data[i] + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { return tmpI + } else { + continue + } + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + tmpI = i } i++ } + if i >= len(data) { + return tmpI + } + i++ } } return 0 } -func inlineHelperEmph1(parser *Parser, out *bytes.Buffer, data []byte, c byte) int { +func helperEmphasis(parser *Parser, out *bytes.Buffer, data []byte, c byte) int { i := 0 // skip one symbol if coming from emph3

@@ -839,7 +841,7 @@ i = 1

} for i < len(data) { - length := inlineHelperFindEmphChar(data[i:], c) + length := helperFindEmphChar(data[i:], c) if length == 0 { return 0 }

@@ -862,7 +864,7 @@ }

} var work bytes.Buffer - parser.parseInline(&work, data[:i]) + parser.inline(&work, data[:i]) parser.r.Emphasis(out, work.Bytes()) return i + 1 }

@@ -871,11 +873,11 @@

return 0 } -func inlineHelperEmph2(parser *Parser, out *bytes.Buffer, data []byte, c byte) int { +func helperDoubleEmphasis(parser *Parser, out *bytes.Buffer, data []byte, c byte) int { i := 0 for i < len(data) { - length := inlineHelperFindEmphChar(data[i:], c) + length := helperFindEmphChar(data[i:], c) if length == 0 { return 0 }

@@ -883,7 +885,7 @@ i += length

if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { var work bytes.Buffer - parser.parseInline(&work, data[:i]) + parser.inline(&work, data[:i]) if work.Len() > 0 { // pick the right renderer

@@ -900,13 +902,13 @@ }

return 0 } -func inlineHelperEmph3(parser *Parser, out *bytes.Buffer, data []byte, offset int, c byte) int { +func helperTripleEmphasis(parser *Parser, out *bytes.Buffer, data []byte, offset int, c byte) int { i := 0 origData := data data = data[offset:] for i < len(data) { - length := inlineHelperFindEmphChar(data[i:], c) + length := helperFindEmphChar(data[i:], c) if length == 0 { return 0 }

@@ -922,14 +924,14 @@ case i+2 < len(data) && data[i+1] == c && data[i+2] == c:

// triple symbol found var work bytes.Buffer - parser.parseInline(&work, data[:i]) + parser.inline(&work, data[:i]) if work.Len() > 0 { parser.r.TripleEmphasis(out, work.Bytes()) } return i + 3 case (i+1 < len(data) && data[i+1] == c): // double symbol found, hand over to emph1 - length = inlineHelperEmph1(parser, out, origData[offset-2:], c) + length = helperEmphasis(parser, out, origData[offset-2:], c) if length == 0 { return 0 } else {

@@ -937,7 +939,7 @@ return length - 2

} default: // single symbol found, hand over to emph2 - length = inlineHelperEmph2(parser, out, origData[offset-1:], c) + length = helperDoubleEmphasis(parser, out, origData[offset-1:], c) if length == 0 { return 0 } else {
M markdown.gomarkdown.go

@@ -143,13 +143,13 @@ // The main parser object.

// This is constructed by the Markdown function and // contains state used during the parsing process. type Parser struct { - r Renderer - refs map[string]*reference - inline [256]inlineParser - flags int - nesting int - maxNesting int - insideLink bool + r Renderer + refs map[string]*reference + inlineCallback [256]inlineParser + flags int + nesting int + maxNesting int + insideLink bool }

@@ -211,20 +211,20 @@ parser.maxNesting = 16

parser.insideLink = false // register inline parsers - parser.inline['*'] = inlineEmphasis - parser.inline['_'] = inlineEmphasis + parser.inlineCallback['*'] = emphasis + parser.inlineCallback['_'] = emphasis if extensions&EXTENSION_STRIKETHROUGH != 0 { - parser.inline['~'] = inlineEmphasis + parser.inlineCallback['~'] = emphasis } - parser.inline['`'] = inlineCodeSpan - parser.inline['\n'] = inlineLineBreak - parser.inline['['] = inlineLink - parser.inline['<'] = inlineLAngle - parser.inline['\\'] = inlineEscape - parser.inline['&'] = inlineEntity + parser.inlineCallback['`'] = codeSpan + parser.inlineCallback['\n'] = lineBreak + parser.inlineCallback['['] = link + parser.inlineCallback['<'] = leftAngle + parser.inlineCallback['\\'] = escape + parser.inlineCallback['&'] = entity if extensions&EXTENSION_AUTOLINK != 0 { - parser.inline[':'] = inlineAutoLink + parser.inlineCallback[':'] = autoLink } first := firstPass(parser, input)

@@ -284,7 +284,7 @@ func secondPass(parser *Parser, input []byte) []byte {

var output bytes.Buffer parser.r.DocumentHeader(&output) - parser.parseBlock(&output, input) + parser.block(&output, input) parser.r.DocumentFooter(&output) if parser.nesting != 0 {
M smartypants.gosmartypants.go

@@ -39,7 +39,7 @@ func isdigit(c byte) bool {

return c >= '0' && c <= '9' } -func smartQuotesHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool) bool { +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool) bool { // edge of the buffer is likely to be a tag that we don't get to see, // so we treat it like text sometimes

@@ -107,7 +107,7 @@ out.WriteString("quo;")

return true } -func smartSquote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { +func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { if len(text) >= 2 { t1 := tolower(text[1])

@@ -116,7 +116,7 @@ nextChar := byte(0)

if len(text) >= 3 { nextChar = text[2] } - if smartQuotesHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { + if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { return 1 } }

@@ -141,7 +141,7 @@ nextChar := byte(0)

if len(text) > 1 { nextChar = text[1] } - if smartQuotesHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote) { + if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote) { return 0 }

@@ -211,7 +211,7 @@ nextChar := byte(0)

if len(text) >= 7 { nextChar = text[6] } - if smartQuotesHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { + if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { return 5 } }

@@ -245,7 +245,7 @@ nextChar := byte(0)

if len(text) >= 3 { nextChar = text[2] } - if smartQuotesHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { + if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { return 1 } }

@@ -318,19 +318,19 @@ out.WriteByte(text[0])

return 0 } -func smartDquote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { +func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { nextChar := byte(0) if len(text) > 1 { nextChar = text[1] } - if !smartQuotesHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { + if !smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) { out.WriteString("&quot;") } return 0 } -func smartLtag(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { +func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { i := 0 for i < len(text) && text[i] != '>' {

@@ -347,9 +347,9 @@ type SmartypantsRenderer [256]smartCallback

func Smartypants(flags int) *SmartypantsRenderer { r := new(SmartypantsRenderer) - r['"'] = smartDquote + r['"'] = smartDoubleQuote r['&'] = smartAmp - r['\''] = smartSquote + r['\''] = smartSingleQuote r['('] = smartParens if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 { r['-'] = smartDash

@@ -365,7 +365,7 @@ for ch := '1'; ch <= '9'; ch++ {

r[ch] = smartNumberGeneric } } - r['<'] = smartLtag + r['<'] = smartLeftAngle r['`'] = smartBacktick return r }