all repos — grayfriday @ be082a1ef25d4211496762bbbdbab6df7e68c1f3

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                        // render tables
 33	EXTENSION_FENCED_CODE                   // render fenced code blocks
 34	EXTENSION_AUTOLINK                      // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                 // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS               // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                 // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK               // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                // expand tabs to eight spaces instead of four
 40	EXTENSION_FOOTNOTES                     // Pandoc-style footnotes
 41)
 42
 43// These are the possible flag values for the link renderer.
 44// Only a single one of these values will be used; they are not ORed together.
 45// These are mostly of interest if you are writing a new output format.
 46const (
 47	LINK_TYPE_NOT_AUTOLINK = iota
 48	LINK_TYPE_NORMAL
 49	LINK_TYPE_EMAIL
 50)
 51
 52// These are the possible flag values for the ListItem renderer.
 53// Multiple flag values may be ORed together.
 54// These are mostly of interest if you are writing a new output format.
 55const (
 56	LIST_TYPE_ORDERED = 1 << iota
 57	LIST_ITEM_CONTAINS_BLOCK
 58	LIST_ITEM_BEGINNING_OF_LIST
 59	LIST_ITEM_END_OF_LIST
 60)
 61
 62// These are the possible flag values for the table cell renderer.
 63// Only a single one of these values will be used; they are not ORed together.
 64// These are mostly of interest if you are writing a new output format.
 65const (
 66	TABLE_ALIGNMENT_LEFT = 1 << iota
 67	TABLE_ALIGNMENT_RIGHT
 68	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 69)
 70
 71// The size of a tab stop.
 72const (
 73	TAB_SIZE_DEFAULT = 4
 74	TAB_SIZE_EIGHT   = 8
 75)
 76
 77// These are the tags that are recognized as HTML block tags.
 78// Any of these can be included in markdown text without special escaping.
 79var blockTags = map[string]bool{
 80	"p":          true,
 81	"dl":         true,
 82	"h1":         true,
 83	"h2":         true,
 84	"h3":         true,
 85	"h4":         true,
 86	"h5":         true,
 87	"h6":         true,
 88	"ol":         true,
 89	"ul":         true,
 90	"del":        true,
 91	"div":        true,
 92	"ins":        true,
 93	"pre":        true,
 94	"form":       true,
 95	"math":       true,
 96	"table":      true,
 97	"iframe":     true,
 98	"script":     true,
 99	"fieldset":   true,
100	"noscript":   true,
101	"blockquote": true,
102
103	// HTML5
104	"video":      true,
105	"aside":      true,
106	"canvas":     true,
107	"figure":     true,
108	"footer":     true,
109	"header":     true,
110	"hgroup":     true,
111	"output":     true,
112	"article":    true,
113	"section":    true,
114	"progress":   true,
115	"figcaption": true,
116}
117
118// Renderer is the rendering interface.
119// This is mostly of interest if you are implementing a new rendering format.
120//
121// When a byte slice is provided, it contains the (rendered) contents of the
122// element.
123//
124// When a callback is provided instead, it will write the contents of the
125// respective element directly to the output buffer and return true on success.
126// If the callback returns false, the rendering function should reset the
127// output buffer as though it had never been called.
128//
129// Currently Html and Latex implementations are provided
130type Renderer interface {
131	// block-level callbacks
132	BlockCode(out *bytes.Buffer, text []byte, lang string)
133	BlockQuote(out *bytes.Buffer, text []byte)
134	BlockHtml(out *bytes.Buffer, text []byte)
135	Header(out *bytes.Buffer, text func() bool, level int)
136	HRule(out *bytes.Buffer)
137	List(out *bytes.Buffer, text func() bool, flags int)
138	ListItem(out *bytes.Buffer, text []byte, flags int)
139	Paragraph(out *bytes.Buffer, text func() bool)
140	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
141	TableRow(out *bytes.Buffer, text []byte)
142	TableCell(out *bytes.Buffer, text []byte, flags int)
143	Footnotes(out *bytes.Buffer, p *parser)
144
145	// Span-level callbacks
146	AutoLink(out *bytes.Buffer, link []byte, kind int)
147	CodeSpan(out *bytes.Buffer, text []byte)
148	DoubleEmphasis(out *bytes.Buffer, text []byte)
149	Emphasis(out *bytes.Buffer, text []byte)
150	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
151	LineBreak(out *bytes.Buffer)
152	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
153	RawHtmlTag(out *bytes.Buffer, tag []byte)
154	TripleEmphasis(out *bytes.Buffer, text []byte)
155	StrikeThrough(out *bytes.Buffer, text []byte)
156	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
157
158	// Low-level callbacks
159	Entity(out *bytes.Buffer, entity []byte)
160	NormalText(out *bytes.Buffer, text []byte)
161
162	// Header and footer
163	DocumentHeader(out *bytes.Buffer)
164	DocumentFooter(out *bytes.Buffer)
165}
166
167// Callback functions for inline parsing. One such function is defined
168// for each character that triggers a response when parsing inline data.
169type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
170
171// Parser holds runtime state used by the parser.
172// This is constructed by the Markdown function.
173type parser struct {
174	r              Renderer
175	refs           map[string]*reference
176	inlineCallback [256]inlineParser
177	flags          int
178	nesting        int
179	maxNesting     int
180	insideLink     bool
181
182	// Footnotes need to be ordered as well as available to quickly check for
183	// presence. If a ref is also a footnote, it's stored both in refs and here
184	// in notes. Slice is nil if footnotes not enabled.
185	notes []*reference
186}
187
188//
189//
190// Public interface
191//
192//
193
194// MarkdownBasic is a convenience function for simple rendering.
195// It processes markdown input with no extensions enabled.
196func MarkdownBasic(input []byte) []byte {
197	// set up the HTML renderer
198	htmlFlags := HTML_USE_XHTML
199	renderer := HtmlRenderer(htmlFlags, "", "")
200
201	// set up the parser
202	extensions := 0
203
204	return Markdown(input, renderer, extensions)
205}
206
207// Call Markdown with most useful extensions enabled
208// MarkdownCommon is a convenience function for simple rendering.
209// It processes markdown input with common extensions enabled, including:
210//
211// * Smartypants processing with smart fractions and LaTeX dashes
212//
213// * Intra-word emphasis suppression
214//
215// * Tables
216//
217// * Fenced code blocks
218//
219// * Autolinking
220//
221// * Strikethrough support
222//
223// * Strict header parsing
224func MarkdownCommon(input []byte) []byte {
225	// set up the HTML renderer
226	htmlFlags := 0
227	htmlFlags |= HTML_USE_XHTML
228	htmlFlags |= HTML_USE_SMARTYPANTS
229	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
230	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
231	htmlFlags |= HTML_SKIP_SCRIPT
232	renderer := HtmlRenderer(htmlFlags, "", "")
233
234	// set up the parser
235	extensions := 0
236	extensions |= EXTENSION_NO_INTRA_EMPHASIS
237	extensions |= EXTENSION_TABLES
238	extensions |= EXTENSION_FENCED_CODE
239	extensions |= EXTENSION_AUTOLINK
240	extensions |= EXTENSION_STRIKETHROUGH
241	extensions |= EXTENSION_SPACE_HEADERS
242
243	return Markdown(input, renderer, extensions)
244}
245
246// Markdown is the main rendering function.
247// It parses and renders a block of markdown-encoded text.
248// The supplied Renderer is used to format the output, and extensions dictates
249// which non-standard extensions are enabled.
250//
251// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
252// LatexRenderer, respectively.
253func Markdown(input []byte, renderer Renderer, extensions int) []byte {
254	// no point in parsing if we can't render
255	if renderer == nil {
256		return nil
257	}
258
259	// fill in the render structure
260	p := new(parser)
261	p.r = renderer
262	p.flags = extensions
263	p.refs = make(map[string]*reference)
264	p.maxNesting = 16
265	p.insideLink = false
266
267	// register inline parsers
268	p.inlineCallback['*'] = emphasis
269	p.inlineCallback['_'] = emphasis
270	if extensions&EXTENSION_STRIKETHROUGH != 0 {
271		p.inlineCallback['~'] = emphasis
272	}
273	p.inlineCallback['`'] = codeSpan
274	p.inlineCallback['\n'] = lineBreak
275	p.inlineCallback['['] = link
276	p.inlineCallback['<'] = leftAngle
277	p.inlineCallback['\\'] = escape
278	p.inlineCallback['&'] = entity
279
280	if extensions&EXTENSION_AUTOLINK != 0 {
281		p.inlineCallback[':'] = autoLink
282	}
283
284	if extensions&EXTENSION_FOOTNOTES != 0 {
285		p.notes = make([]*reference, 0)
286	}
287
288	first := firstPass(p, input)
289	second := secondPass(p, first)
290
291	return second
292}
293
294// first pass:
295// - extract references
296// - expand tabs
297// - normalize newlines
298// - copy everything else
299func firstPass(p *parser, input []byte) []byte {
300	var out bytes.Buffer
301	tabSize := TAB_SIZE_DEFAULT
302	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
303		tabSize = TAB_SIZE_EIGHT
304	}
305	beg, end := 0, 0
306	for beg < len(input) { // iterate over lines
307		if end = isReference(p, input[beg:], tabSize); end > 0 {
308			beg += end
309		} else { // skip to the next line
310			end = beg
311			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
312				end++
313			}
314
315			// add the line body if present
316			if end > beg {
317				expandTabs(&out, input[beg:end], tabSize)
318			}
319			out.WriteByte('\n')
320
321			if end < len(input) && input[end] == '\r' {
322				end++
323			}
324			if end < len(input) && input[end] == '\n' {
325				end++
326			}
327
328			beg = end
329		}
330	}
331
332	// empty input?
333	if out.Len() == 0 {
334		out.WriteByte('\n')
335	}
336
337	return out.Bytes()
338}
339
340// second pass: actual rendering
341func secondPass(p *parser, input []byte) []byte {
342	var output bytes.Buffer
343
344	p.r.DocumentHeader(&output)
345	p.block(&output, input)
346
347	// NOTE: this is a big hack because we need the parser again for the
348	// footnotes, so this can't really go in the public interface
349	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
350		p.r.Footnotes(&output, p)
351	}
352
353	p.r.DocumentFooter(&output)
354
355	if p.nesting != 0 {
356		panic("Nesting level did not end at zero")
357	}
358
359	return output.Bytes()
360}
361
362//
363// Link references
364//
365// This section implements support for references that (usually) appear
366// as footnotes in a document, and can be referenced anywhere in the document.
367// The basic format is:
368//
369//    [1]: http://www.google.com/ "Google"
370//    [2]: http://www.github.com/ "Github"
371//
372// Anywhere in the document, the reference can be linked by referring to its
373// label, i.e., 1 and 2 in this example, as in:
374//
375//    This library is hosted on [Github][2], a git hosting site.
376//
377// Actual footnotes as specified in Pandoc and supported by some other Markdown
378// libraries such as php-markdown are also taken care of. They look like this:
379//
380//    This sentence needs a bit of further explanation.[^note]
381//
382//    [^note]: This is the explanation.
383//
384// Footnotes should be placed at the end of the document in an ordered list.
385// Inline footnotes such as:
386//
387//    Inline footnotes^[Not supported.] also exist.
388//
389// are not yet supported.
390
391// References are parsed and stored in this struct.
392type reference struct {
393	link   []byte
394	title  []byte
395	noteId int // 0 if not a footnote ref
396}
397
398// Check whether or not data starts with a reference link.
399// If so, it is parsed and stored in the list of references
400// (in the render struct).
401// Returns the number of bytes to skip to move past it,
402// or zero if the first line is not a reference.
403func isReference(p *parser, data []byte, tabSize int) int {
404	println("[", string(data), "]")
405	// up to 3 optional leading spaces
406	if len(data) < 4 {
407		return 0
408	}
409	i := 0
410	for i < 3 && data[i] == ' ' {
411		i++
412	}
413
414	noteId := 0
415
416	// id part: anything but a newline between brackets
417	if data[i] != '[' {
418		return 0
419	}
420	i++
421	if p.flags&EXTENSION_FOOTNOTES != 0 {
422		if data[i] == '^' {
423			noteId = len(p.notes) + 1
424			i++
425		}
426	}
427	idOffset := i
428	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
429		i++
430	}
431	if i >= len(data) || data[i] != ']' {
432		return 0
433	}
434	idEnd := i
435
436	// spacer: colon (space | tab)* newline? (space | tab)*
437	// /:[ \t]*\n?[ \t]*/
438	i++
439	if i >= len(data) || data[i] != ':' {
440		return 0
441	}
442	i++
443	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
444		i++
445	}
446	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
447		i++
448		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
449			i++
450		}
451	}
452	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
453		i++
454	}
455	if i >= len(data) {
456		return 0
457	}
458
459	var (
460		linkOffset, linkEnd   int
461		titleOffset, titleEnd int
462		lineEnd               int
463		raw                   []byte
464	)
465
466	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId > 0 {
467		linkOffset, linkEnd, raw = scanFootnote(p, data, i, tabSize)
468		lineEnd = linkEnd + linkOffset
469	} else {
470		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
471	}
472	if lineEnd == 0 {
473		return 0
474	}
475
476	// a valid ref has been found
477
478	ref := &reference{
479		noteId: noteId,
480	}
481
482	if noteId > 0 {
483		// reusing the link field for the id since footnotes don't have titles
484		ref.link = data[idOffset:idEnd]
485		// if footnote, it's not really a title, it's the contained text
486		ref.title = raw
487		p.notes = append(p.notes, ref)
488	} else {
489		ref.link = data[linkOffset:linkEnd]
490		ref.title = data[titleOffset:titleEnd]
491	}
492
493	// id matches are case-insensitive
494	id := string(bytes.ToLower(data[idOffset:idEnd]))
495	p.refs[id] = ref
496	return lineEnd
497}
498
499func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
500	// link: whitespace-free sequence, optionally between angle brackets
501	if data[i] == '<' {
502		i++
503	}
504	linkOffset = i
505	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
506		i++
507	}
508	linkEnd = i
509	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
510		linkOffset++
511		linkEnd--
512	}
513
514	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
515	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
516		i++
517	}
518	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
519		return
520	}
521
522	// compute end-of-line
523	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
524		lineEnd = i
525	}
526	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
527		lineEnd++
528	}
529
530	// optional (space|tab)* spacer after a newline
531	if lineEnd > 0 {
532		i = lineEnd + 1
533		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
534			i++
535		}
536	}
537
538	// optional title: any non-newline sequence enclosed in '"() alone on its line
539	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
540		i++
541		titleOffset = i
542
543		// look for EOL
544		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
545			i++
546		}
547		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
548			titleEnd = i + 1
549		} else {
550			titleEnd = i
551		}
552
553		// step back
554		i--
555		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
556			i--
557		}
558		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
559			lineEnd = titleEnd
560			titleEnd = i
561		}
562	}
563
564	return
565}
566
567// The first bit of this logic is the same as (*parser).listItem, but the rest
568// is much simpler. This function simply finds the entire block and shifts it
569// over by one tab if it is indeed a block (just returns the line if it's not).
570// blockEnd is the end of the section in the input buffer, and contents is the
571// extracted text that was shifted over one tab. It will need to be rendered at
572// the end of the document.
573func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte) {
574	if i == 0 {
575		return
576	}
577
578	// skip leading whitespace on first line
579	for data[i] == ' ' {
580		i++
581	}
582
583	blockStart = i
584
585	// find the end of the line
586	blockEnd = i
587	for data[i-1] != '\n' {
588		if i >= len(data) {
589			return
590		}
591		i++
592	}
593
594	// get working buffer
595	var raw bytes.Buffer
596
597	// put the first line into the working buffer
598	raw.Write(data[blockEnd:i])
599	blockEnd = i
600
601	// process the following lines
602	containsBlankLine := false
603	hasBlock := false
604
605gatherLines:
606	for blockEnd < len(data) {
607		i++
608
609		// find the end of this line
610		for data[i-1] != '\n' {
611			i++
612		}
613
614		// if it is an empty line, guess that it is part of this item
615		// and move on to the next line
616		if p.isEmpty(data[blockEnd:i]) > 0 {
617			containsBlankLine = true
618			blockEnd = i
619			continue
620		}
621
622		n := 0
623		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
624			// this is the end of the block.
625			// we don't want to include this last line in the index.
626			break gatherLines
627		}
628
629		// if there were blank lines before this one, insert a new one now
630		if containsBlankLine {
631			hasBlock = true
632			raw.WriteByte('\n')
633			containsBlankLine = false
634		}
635
636		// get rid of that first tab, write to buffer
637		raw.Write(data[blockEnd+n : i])
638
639		blockEnd = i
640	}
641
642	rawBytes := raw.Bytes()
643	println("raw: {" + string(raw.Bytes()) + "}")
644	buf := new(bytes.Buffer)
645
646	if hasBlock {
647		p.block(buf, rawBytes)
648	} else {
649		p.inline(buf, rawBytes)
650	}
651	contents = buf.Bytes()
652
653	return
654}
655
656//
657//
658// Miscellaneous helper functions
659//
660//
661
662// Test if a character is a punctuation symbol.
663// Taken from a private function in regexp in the stdlib.
664func ispunct(c byte) bool {
665	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
666		if c == r {
667			return true
668		}
669	}
670	return false
671}
672
673// Test if a character is a whitespace character.
674func isspace(c byte) bool {
675	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
676}
677
678// Test if a character is a letter or a digit.
679// TODO: check when this is looking for ASCII alnum and when it should use unicode
680func isalnum(c byte) bool {
681	return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
682}
683
684// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
685// always ends output with a newline
686func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
687	// first, check for common cases: no tabs, or only tabs at beginning of line
688	i, prefix := 0, 0
689	slowcase := false
690	for i = 0; i < len(line); i++ {
691		if line[i] == '\t' {
692			if prefix == i {
693				prefix++
694			} else {
695				slowcase = true
696				break
697			}
698		}
699	}
700
701	// no need to decode runes if all tabs are at the beginning of the line
702	if !slowcase {
703		for i = 0; i < prefix*tabSize; i++ {
704			out.WriteByte(' ')
705		}
706		out.Write(line[prefix:])
707		return
708	}
709
710	// the slow case: we need to count runes to figure out how
711	// many spaces to insert for each tab
712	column := 0
713	i = 0
714	for i < len(line) {
715		start := i
716		for i < len(line) && line[i] != '\t' {
717			_, size := utf8.DecodeRune(line[i:])
718			i += size
719			column++
720		}
721
722		if i > start {
723			out.Write(line[start:i])
724		}
725
726		if i >= len(line) {
727			break
728		}
729
730		for {
731			out.WriteByte(' ')
732			column++
733			if column%tabSize == 0 {
734				break
735			}
736		}
737
738		i++
739	}
740}
741
742// Find if a line counts as indented or not.
743// Returns number of characters the indent is (0 = not indented).
744func isIndented(data []byte, indentSize int) int {
745	if len(data) == 0 {
746		return 0
747	}
748	if data[0] == '\t' {
749		return 1
750	}
751	if len(data) < indentSize {
752		return 0
753	}
754	for i := 0; i < indentSize; i++ {
755		if data[i] != ' ' {
756			return 0
757		}
758	}
759	return indentSize
760}
761
762// Create a url-safe slug for fragments
763func slugify(in []byte) []byte {
764	if len(in) == 0 {
765		return in
766	}
767	out := make([]byte, 0, len(in))
768	sym := false
769
770	for _, ch := range in {
771		if isalnum(ch) {
772			sym = false
773			out = append(out, ch)
774		} else if sym {
775			continue
776		} else {
777			out = append(out, '-')
778			sym = true
779		}
780	}
781	var a, b int
782	var ch byte
783	for a, ch = range out {
784		if ch != '-' {
785			break
786		}
787	}
788	for b = len(out) - 1; b > 0; b-- {
789		if out[b] != '-' {
790			break
791		}
792	}
793	return out[a : b+1]
794}