Merge Parser into Processor
@@ -34,7 +34,7 @@
// Parse block-level data. // Note: this function and many that it calls assume that // the input buffer ends with a newline. -func (p *Parser) block(data []byte) { +func (p *Processor) block(data []byte) { // this is called recursively: enforce a maximum depth if p.nesting >= p.maxNesting { return@@ -71,7 +71,7 @@ //
// % stuff // % more stuff // % even more stuff - if p.flags&Titleblock != 0 { + if p.extensions&Titleblock != 0 { if data[0] == '%' { if i := p.titleBlock(data, true); i > 0 { data = data[i:]@@ -109,7 +109,7 @@ // }
// return n * fact(n-1) // } // ``` - if p.flags&FencedCode != 0 { + if p.extensions&FencedCode != 0 { if i := p.fencedCodeBlock(data, true); i > 0 { data = data[i:] continue@@ -147,7 +147,7 @@ // Name | Age | Phone
// ------|-----|--------- // Bob | 31 | 555-1234 // Alice | 27 | 555-4321 - if p.flags&Tables != 0 { + if p.extensions&Tables != 0 { if i := p.table(data); i > 0 { data = data[i:] continue@@ -182,7 +182,7 @@ // : Definition b
// // Term 2 // : Definition c - if p.flags&DefinitionLists != 0 { + if p.extensions&DefinitionLists != 0 { if p.dliPrefix(data) > 0 { data = data[p.list(data, ListTypeDefinition):] continue@@ -197,19 +197,19 @@
p.nesting-- } -func (p *Parser) addBlock(typ NodeType, content []byte) *Node { +func (p *Processor) addBlock(typ NodeType, content []byte) *Node { p.closeUnmatchedBlocks() container := p.addChild(typ, 0) container.content = content return container } -func (p *Parser) isPrefixHeading(data []byte) bool { +func (p *Processor) isPrefixHeading(data []byte) bool { if data[0] != '#' { return false } - if p.flags&SpaceHeadings != 0 { + if p.extensions&SpaceHeadings != 0 { level := 0 for level < 6 && level < len(data) && data[level] == '#' { level++@@ -221,7 +221,7 @@ }
return true } -func (p *Parser) prefixHeading(data []byte) int { +func (p *Processor) prefixHeading(data []byte) int { level := 0 for level < 6 && level < len(data) && data[level] == '#' { level++@@ -230,7 +230,7 @@ i := skipChar(data, level, ' ')
end := skipUntilChar(data, i, '\n') skip := end id := "" - if p.flags&HeadingIDs != 0 { + if p.extensions&HeadingIDs != 0 { j, k := 0, 0 // find start/end of heading id for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {@@ -257,7 +257,7 @@ for end > 0 && data[end-1] == ' ' {
end-- } if end > i { - if id == "" && p.flags&AutoHeadingIDs != 0 { + if id == "" && p.extensions&AutoHeadingIDs != 0 { id = sanitized_anchor_name.Create(string(data[i:end])) } block := p.addBlock(Heading, data[i:end])@@ -267,7 +267,7 @@ }
return skip } -func (p *Parser) isUnderlinedHeading(data []byte) int { +func (p *Processor) isUnderlinedHeading(data []byte) int { // test of level 1 heading if data[0] == '=' { i := skipChar(data, 1, '=')@@ -291,7 +291,7 @@
return 0 } -func (p *Parser) titleBlock(data []byte, doRender bool) int { +func (p *Processor) titleBlock(data []byte, doRender bool) int { if data[0] != '%' { return 0 }@@ -315,7 +315,7 @@
return consumed } -func (p *Parser) html(data []byte, doRender bool) int { +func (p *Processor) html(data []byte, doRender bool) int { var i, j int // identify the opening tag@@ -419,7 +419,7 @@ block.content = nil
} // HTML comment, lax form -func (p *Parser) htmlComment(data []byte, doRender bool) int { +func (p *Processor) htmlComment(data []byte, doRender bool) int { i := p.inlineHTMLComment(data) // needs to end with a blank line if j := p.isEmpty(data[i:]); j > 0 {@@ -439,7 +439,7 @@ return 0
} // HR, which is the only self-closing block tag considered -func (p *Parser) htmlHr(data []byte, doRender bool) int { +func (p *Processor) htmlHr(data []byte, doRender bool) int { if len(data) < 4 { return 0 }@@ -472,7 +472,7 @@ }
return 0 } -func (p *Parser) htmlFindTag(data []byte) (string, bool) { +func (p *Processor) htmlFindTag(data []byte) (string, bool) { i := 0 for i < len(data) && isalnum(data[i]) { i++@@ -484,7 +484,7 @@ }
return "", false } -func (p *Parser) htmlFindEnd(tag string, data []byte) int { +func (p *Processor) htmlFindEnd(tag string, data []byte) int { // assume data[0] == '<' && data[1] == '/' already tested if tag == "hr" { return 2@@ -508,7 +508,7 @@ if i >= len(data) {
return i } - if p.flags&LaxHTMLBlocks != 0 { + if p.extensions&LaxHTMLBlocks != 0 { return i } if skip = p.isEmpty(data[i:]); skip == 0 {@@ -519,7 +519,7 @@
return i + skip } -func (*Parser) isEmpty(data []byte) int { +func (*Processor) isEmpty(data []byte) int { // it is okay to call isEmpty on an empty buffer if len(data) == 0 { return 0@@ -537,7 +537,7 @@ }
return i } -func (*Parser) isHRule(data []byte) bool { +func (*Processor) isHRule(data []byte) bool { i := 0 // skip up to three spaces@@ -667,7 +667,7 @@
// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, // or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. // If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int { +func (p *Processor) fencedCodeBlock(data []byte, doRender bool) int { var syntax string beg, marker := isFenceLine(data, &syntax, "") if beg == 0 || beg >= len(data) {@@ -739,7 +739,7 @@ }
block.content = nil } -func (p *Parser) table(data []byte) int { +func (p *Processor) table(data []byte) int { table := p.addBlock(Table, nil) i, columns := p.tableHeader(data) if i == 0 {@@ -782,7 +782,7 @@ }
return backslashes&1 == 1 } -func (p *Parser) tableHeader(data []byte) (size int, columns []CellAlignFlags) { +func (p *Processor) tableHeader(data []byte) (size int, columns []CellAlignFlags) { i := 0 colCount := 1 for i = 0; i < len(data) && data[i] != '\n'; i++ {@@ -895,7 +895,7 @@ }
return } -func (p *Parser) tableRow(data []byte, columns []CellAlignFlags, header bool) { +func (p *Processor) tableRow(data []byte, columns []CellAlignFlags, header bool) { p.addBlock(TableRow, nil) i, col := 0, 0@@ -939,7 +939,7 @@ // silently ignore rows with too many cells
} // returns blockquote prefix length -func (p *Parser) quotePrefix(data []byte) int { +func (p *Processor) quotePrefix(data []byte) int { i := 0 for i < 3 && i < len(data) && data[i] == ' ' { i++@@ -955,7 +955,7 @@ }
// blockquote ends with at least one blank line // followed by something without a blockquote prefix -func (p *Parser) terminateBlockquote(data []byte, beg, end int) bool { +func (p *Processor) terminateBlockquote(data []byte, beg, end int) bool { if p.isEmpty(data[beg:]) <= 0 { return false }@@ -966,7 +966,7 @@ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
} // parse a blockquote fragment -func (p *Parser) quote(data []byte) int { +func (p *Processor) quote(data []byte) int { block := p.addBlock(BlockQuote, nil) var raw bytes.Buffer beg, end := 0, 0@@ -976,7 +976,7 @@ // Step over whole lines, collecting them. While doing that, check for
// fenced code and if one's found, incorporate it altogether, // irregardless of any contents inside it for end < len(data) && data[end] != '\n' { - if p.flags&FencedCode != 0 { + if p.extensions&FencedCode != 0 { if i := p.fencedCodeBlock(data[end:], false); i > 0 { // -1 to compensate for the extra end++ after the loop: end += i - 1@@ -1004,7 +1004,7 @@ return end
} // returns prefix length for block code -func (p *Parser) codePrefix(data []byte) int { +func (p *Processor) codePrefix(data []byte) int { if len(data) >= 1 && data[0] == '\t' { return 1 }@@ -1014,7 +1014,7 @@ }
return 0 } -func (p *Parser) code(data []byte) int { +func (p *Processor) code(data []byte) int { var work bytes.Buffer i := 0@@ -1064,7 +1064,7 @@ return i
} // returns unordered list item prefix -func (p *Parser) uliPrefix(data []byte) int { +func (p *Processor) uliPrefix(data []byte) int { i := 0 // start with up to 3 spaces for i < len(data) && i < 3 && data[i] == ' ' {@@ -1082,7 +1082,7 @@ return i + 2
} // returns ordered list item prefix -func (p *Parser) oliPrefix(data []byte) int { +func (p *Processor) oliPrefix(data []byte) int { i := 0 // start with up to 3 spaces@@ -1107,7 +1107,7 @@ return i + 2
} // returns definition list item prefix -func (p *Parser) dliPrefix(data []byte) int { +func (p *Processor) dliPrefix(data []byte) int { if len(data) < 2 { return 0 }@@ -1123,7 +1123,7 @@ return i + 2
} // parse ordered or unordered list block -func (p *Parser) list(data []byte, flags ListType) int { +func (p *Processor) list(data []byte, flags ListType) int { i := 0 flags |= ListItemBeginningOfList block := p.addBlock(List, nil)@@ -1191,7 +1191,7 @@ }
// Parse a single list item. // Assumes initial prefix is already removed if this is a sublist. -func (p *Parser) listItem(data []byte, flags *ListType) int { +func (p *Processor) listItem(data []byte, flags *ListType) int { // keep track of the indentation of the first line itemIndent := 0 if data[0] == '\t' {@@ -1383,7 +1383,7 @@ return line
} // render a single paragraph that has already been parsed out -func (p *Parser) renderParagraph(data []byte) { +func (p *Processor) renderParagraph(data []byte) { if len(data) == 0 { return }@@ -1408,13 +1408,13 @@
p.addBlock(Paragraph, data[beg:end]) } -func (p *Parser) paragraph(data []byte) int { +func (p *Processor) paragraph(data []byte) int { // prev: index of 1st char of previous line // line: index of 1st char of current line // i: index of cursor/end of current line var prev, line, i int tabSize := TabSizeDefault - if p.flags&TabSizeEight != 0 { + if p.extensions&TabSizeEight != 0 { tabSize = TabSizeDouble } // keep going until we find something to mark the end of the paragraph@@ -1435,7 +1435,7 @@
// did we find a blank line marking the end of the paragraph? if n := p.isEmpty(current); n > 0 { // did this blank line followed by a definition list item? - if p.flags&DefinitionLists != 0 { + if p.extensions&DefinitionLists != 0 { if i < len(data)-1 && data[i+1] == ':' { return p.list(data[prev:], ListTypeDefinition) }@@ -1461,7 +1461,7 @@ eol--
} id := "" - if p.flags&AutoHeadingIDs != 0 { + if p.extensions&AutoHeadingIDs != 0 { id = sanitized_anchor_name.Create(string(data[prev:eol])) }@@ -1478,7 +1478,7 @@ }
} // if the next line starts a block of HTML, then the paragraph ends here - if p.flags&LaxHTMLBlocks != 0 { + if p.extensions&LaxHTMLBlocks != 0 { if data[i] == '<' && p.html(current, false) > 0 { // rewind to before the HTML block p.renderParagraph(data[:i])@@ -1493,7 +1493,7 @@ return i
} // if there's a fenced code block, paragraph is over - if p.flags&FencedCode != 0 { + if p.extensions&FencedCode != 0 { if p.fencedCodeBlock(current, false) > 0 { p.renderParagraph(data[:i]) return i@@ -1501,7 +1501,7 @@ }
} // if there's a definition list item, prev line is a definition term - if p.flags&DefinitionLists != 0 { + if p.extensions&DefinitionLists != 0 { if p.dliPrefix(current) != 0 { ret := p.list(data[prev:], ListTypeDefinition) return ret@@ -1509,7 +1509,7 @@ }
} // if there's a list after this, paragraph is over - if p.flags&NoEmptyLineBeforeBlock != 0 { + if p.extensions&NoEmptyLineBeforeBlock != 0 { if p.uliPrefix(current) != 0 || p.oliPrefix(current) != 0 || p.quotePrefix(current) != 0 ||
@@ -32,7 +32,7 @@ // Each function returns the number of chars taken care of
// data is the complete block being rendered // offset is the number of valid chars before the current cursor -func (p *Parser) inline(currBlock *Node, data []byte) { +func (p *Processor) inline(currBlock *Node, data []byte) { // handlers might call us recursively: enforce a maximum depth if p.nesting >= p.maxNesting || len(data) == 0 { return@@ -69,7 +69,7 @@ p.nesting--
} // single and double emphasis parsing -func emphasis(p *Parser, data []byte, offset int) (int, *Node) { +func emphasis(p *Processor, data []byte, offset int) (int, *Node) { data = data[offset:] c := data[0]@@ -114,7 +114,7 @@
return 0, nil } -func codeSpan(p *Parser, data []byte, offset int) (int, *Node) { +func codeSpan(p *Processor, data []byte, offset int) (int, *Node) { data = data[offset:] nb := 0@@ -161,7 +161,7 @@ return end, nil
} // newline preceded by two spaces becomes <br> -func maybeLineBreak(p *Parser, data []byte, offset int) (int, *Node) { +func maybeLineBreak(p *Processor, data []byte, offset int) (int, *Node) { origOffset := offset for offset < len(data) && data[offset] == ' ' { offset++@@ -177,8 +177,8 @@ return 0, nil
} // newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Parser, data []byte, offset int) (int, *Node) { - if p.flags&HardLineBreak != 0 { +func lineBreak(p *Processor, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { return 1, NewNode(Hardbreak) } return 0, nil@@ -200,14 +200,14 @@ }
return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' } -func maybeImage(p *Parser, data []byte, offset int) (int, *Node) { +func maybeImage(p *Processor, data []byte, offset int) (int, *Node) { if offset < len(data)-1 && data[offset+1] == '[' { return link(p, data, offset) } return 0, nil } -func maybeInlineFootnote(p *Parser, data []byte, offset int) (int, *Node) { +func maybeInlineFootnote(p *Processor, data []byte, offset int) (int, *Node) { if offset < len(data)-1 && data[offset+1] == '[' { return link(p, data, offset) }@@ -215,7 +215,7 @@ return 0, nil
} // '[': parse a link or an image or a footnote -func link(p *Parser, data []byte, offset int) (int, *Node) { +func link(p *Processor, data []byte, offset int) (int, *Node) { // no links allowed inside regular links, footnote, and deferred footnotes if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { return 0, nil@@ -225,7 +225,7 @@ var t linkType
switch { // special case: ![^text] == deferred footnote (that follows something with // an exclamation point) - case p.flags&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': t = linkDeferredFootnote // ![alt] == image case offset >= 0 && data[offset] == '!':@@ -233,7 +233,7 @@ t = linkImg
offset++ // ^[text] == inline footnote // [^refId] == deferred footnote - case p.flags&Footnotes != 0: + case p.extensions&Footnotes != 0: if offset >= 0 && data[offset] == '^' { t = linkInlineFootnote offset++@@ -573,7 +573,7 @@
return i, linkNode } -func (p *Parser) inlineHTMLComment(data []byte) int { +func (p *Processor) inlineHTMLComment(data []byte) int { if len(data) < 5 { return 0 }@@ -613,7 +613,7 @@ emailAutolink
) // '<' when tags or autolinks are allowed -func leftAngle(p *Parser, data []byte, offset int) (int, *Node) { +func leftAngle(p *Processor, data []byte, offset int) (int, *Node) { data = data[offset:] altype, end := tagLength(data) if size := p.inlineHTMLComment(data); size > 0 {@@ -646,11 +646,11 @@
// '\\' backslash escape var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") -func escape(p *Parser, data []byte, offset int) (int, *Node) { +func escape(p *Processor, data []byte, offset int) (int, *Node) { data = data[offset:] if len(data) > 1 { - if p.flags&BackslashLineBreak != 0 && data[1] == '\n' { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { return 2, NewNode(Hardbreak) } if bytes.IndexByte(escapeChars, data[1]) < 0 {@@ -686,7 +686,7 @@ }
// '&' escaped when it doesn't belong to an entity // valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Parser, data []byte, offset int) (int, *Node) { +func entity(p *Processor, data []byte, offset int) (int, *Node) { data = data[offset:] end := 1@@ -748,7 +748,7 @@ }
const shortestPrefix = 6 // len("ftp://"), the shortest of the above -func maybeAutoLink(p *Parser, data []byte, offset int) (int, *Node) { +func maybeAutoLink(p *Processor, data []byte, offset int) (int, *Node) { // quick check to rule out most false hits if p.insideLink || len(data) < offset+shortestPrefix { return 0, nil@@ -765,7 +765,7 @@ }
return 0, nil } -func autoLink(p *Parser, data []byte, offset int) (int, *Node) { +func autoLink(p *Processor, data []byte, offset int) (int, *Node) { // Now a more expensive check to see if we're not inside an anchor element anchorStart := offset offsetFromAnchor := 0@@ -1095,7 +1095,7 @@ }
return 0 } -func helperEmphasis(p *Parser, data []byte, c byte) (int, *Node) { +func helperEmphasis(p *Processor, data []byte, c byte) (int, *Node) { i := 0 // skip one symbol if coming from emph3@@ -1120,7 +1120,7 @@ }
if data[i] == c && !isspace(data[i-1]) { - if p.flags&NoIntraEmphasis != 0 { + if p.extensions&NoIntraEmphasis != 0 { if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { continue }@@ -1135,7 +1135,7 @@
return 0, nil } -func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, *Node) { +func helperDoubleEmphasis(p *Processor, data []byte, c byte) (int, *Node) { i := 0 for i < len(data) {@@ -1159,7 +1159,7 @@ }
return 0, nil } -func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, *Node) { +func helperTripleEmphasis(p *Processor, data []byte, offset int, c byte) (int, *Node) { i := 0 origData := data data = data[offset:]
@@ -154,18 +154,19 @@ }
// Callback functions for inline parsing. One such function is defined // for each character that triggers a response when parsing inline data. -type inlineParser func(p *Parser, data []byte, offset int) (int, *Node) +type inlineParser func(p *Processor, data []byte, offset int) (int, *Node) -// Parser holds runtime state used by the parser. +// Processor holds runtime state used by the parser. // This is constructed by the Markdown function. -type Parser struct { - refOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - flags Extensions - nesting int - maxNesting int - insideLink bool +type Processor struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool // Footnotes need to be ordered as well as available to quickly check for // presence. If a ref is also a footnote, it's stored both in refs and here@@ -179,9 +180,9 @@ lastMatchedContainer *Node // = doc
allClosed bool } -func (p *Parser) getRef(refid string) (ref *reference, found bool) { - if p.refOverride != nil { - r, overridden := p.refOverride(refid) +func (p *Processor) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) if overridden { if r == nil { return nil, false@@ -199,17 +200,17 @@ ref, found = p.refs[strings.ToLower(refid)]
return ref, found } -func (p *Parser) finalize(block *Node) { +func (p *Processor) finalize(block *Node) { above := block.Parent block.open = false p.tip = above } -func (p *Parser) addChild(node NodeType, offset uint32) *Node { +func (p *Processor) addChild(node NodeType, offset uint32) *Node { return p.addExistingChild(NewNode(node), offset) } -func (p *Parser) addExistingChild(node *Node, offset uint32) *Node { +func (p *Processor) addExistingChild(node *Node, offset uint32) *Node { for !p.tip.canContain(node.Type) { p.finalize(p.tip) }@@ -218,7 +219,7 @@ p.tip = node
return node } -func (p *Parser) closeUnmatchedBlocks() { +func (p *Processor) closeUnmatchedBlocks() { if !p.allClosed { for p.oldTip != p.lastMatchedContainer { parent := p.oldTip.Parent@@ -253,32 +254,13 @@ // nil. If overridden is false, the default reference logic will be executed.
// See the documentation in Options for more details on use-case. type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) -// Processor contains all the state necessary for Blackfriday to operate. -type Processor struct { - r Renderer - extensions Extensions - referenceOverride ReferenceOverrideFunc -} - -// DefaultProcessor creates the processor tuned to the most common behavior. -func DefaultProcessor() *Processor { - return &Processor{ - r: NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }), - extensions: CommonExtensions, - } -} - -// NewParser constructs a Parser. You can use the same With* functions as for +// NewProcessor constructs a Parser. You can use the same With* functions as for // Markdown() to customize parser's behavior. -func (proc *Processor) NewParser(opts ...Option) *Parser { +func NewProcessor(opts ...Option) *Processor { + var p Processor for _, opt := range opts { - opt(proc) + opt(&p) } - var p Parser - p.flags = proc.extensions - p.refOverride = proc.referenceOverride p.refs = make(map[string]*reference) p.maxNesting = 16 p.insideLink = false@@ -292,7 +274,7 @@ // register inline parsers
p.inlineCallback[' '] = maybeLineBreak p.inlineCallback['*'] = emphasis p.inlineCallback['_'] = emphasis - if proc.extensions&Strikethrough != 0 { + if p.extensions&Strikethrough != 0 { p.inlineCallback['~'] = emphasis } p.inlineCallback['`'] = codeSpan@@ -303,7 +285,7 @@ p.inlineCallback['\\'] = escape
p.inlineCallback['&'] = entity p.inlineCallback['!'] = maybeImage p.inlineCallback['^'] = maybeInlineFootnote - if proc.extensions&Autolink != 0 { + if p.extensions&Autolink != 0 { p.inlineCallback['h'] = maybeAutoLink p.inlineCallback['m'] = maybeAutoLink p.inlineCallback['f'] = maybeAutoLink@@ -311,7 +293,7 @@ p.inlineCallback['H'] = maybeAutoLink
p.inlineCallback['M'] = maybeAutoLink p.inlineCallback['F'] = maybeAutoLink } - if proc.extensions&Footnotes != 0 { + if p.extensions&Footnotes != 0 { p.notes = make([]*reference, 0) } return &p@@ -323,7 +305,7 @@
// WithRenderer allows you to override the default renderer. func WithRenderer(r Renderer) Option { return func(p *Processor) { - p.r = r + p.renderer = r } }@@ -339,7 +321,7 @@ // WithNoExtensions turns off all extensions and custom behavior.
func WithNoExtensions() Option { return func(p *Processor) { p.extensions = NoExtensions - p.r = NewHTMLRenderer(HTMLRendererParameters{ + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ Flags: HTMLFlagsNone, }) }@@ -385,16 +367,20 @@ // former:
// output := Markdown(input, WithNoExtensions(), WithExtensions(exts), // WithRenderer(yourRenderer)) func Markdown(input []byte, opts ...Option) []byte { - p := DefaultProcessor() - parser := p.NewParser(opts...) - return p.r.Render(parser.Parse(input)) + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := NewProcessor(optList...) + return parser.renderer.Render(parser.Parse(input)) } // Parse is an entry point to the parsing part of Blackfriday. It takes an // input markdown document and produces a syntax tree for its contents. This // tree can then be rendered with a default or custom renderer, or // analyzed/transformed by the caller to whatever non-standard needs they have. -func (p *Parser) Parse(input []byte) *Node { +func (p *Processor) Parse(input []byte) *Node { p.block(input) // Walk the tree and finish up some of unfinished blocks for p.tip != nil {@@ -412,8 +398,8 @@ p.parseRefsToAST()
return p.doc } -func (p *Parser) parseRefsToAST() { - if p.flags&Footnotes == 0 || len(p.notes) == 0 { +func (p *Processor) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { return } p.tip = p.doc@@ -537,7 +523,7 @@ // If so, it is parsed and stored in the list of references
// (in the render struct). // Returns the number of bytes to skip to move past it, // or zero if the first line is not a reference. -func isReference(p *Parser, data []byte, tabSize int) int { +func isReference(p *Processor, data []byte, tabSize int) int { // up to 3 optional leading spaces if len(data) < 4 { return 0@@ -554,7 +540,7 @@ if data[i] != '[' {
return 0 } i++ - if p.flags&Footnotes != 0 { + if p.extensions&Footnotes != 0 { if i < len(data) && data[i] == '^' { // we can set it to anything here because the proper noteIds will // be assigned later during the second pass. It just has to be != 0@@ -605,7 +591,7 @@ raw []byte
hasBlock bool ) - if p.flags&Footnotes != 0 && noteID != 0 { + if p.extensions&Footnotes != 0 && noteID != 0 { linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) lineEnd = linkEnd } else {@@ -640,7 +626,7 @@
return lineEnd } -func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { +func scanLinkRef(p *Processor, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { // link: whitespace-free sequence, optionally between angle brackets if data[i] == '<' { i++@@ -714,7 +700,7 @@ // over by one tab if it is indeed a block (just returns the line if it's not).
// blockEnd is the end of the section in the input buffer, and contents is the // extracted text that was shifted over one tab. It will need to be rendered at // the end of the document. -func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { +func scanFootnote(p *Processor, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { if i == 0 || len(data) == 0 { return }