all repos — grayfriday @ 82ba58501c306edf762b90c8579fb6923a6520b9

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                        // render tables
 33	EXTENSION_FENCED_CODE                   // render fenced code blocks
 34	EXTENSION_AUTOLINK                      // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                 // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS               // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                 // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK               // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                // expand tabs to eight spaces instead of four
 40)
 41
 42// These are the possible flag values for the link renderer.
 43// Only a single one of these values will be used; they are not ORed together.
 44// These are mostly of interest if you are writing a new output format.
 45const (
 46	LINK_TYPE_NOT_AUTOLINK = iota
 47	LINK_TYPE_NORMAL
 48	LINK_TYPE_EMAIL
 49)
 50
 51// These are the possible flag values for the ListItem renderer.
 52// Multiple flag values may be ORed together.
 53// These are mostly of interest if you are writing a new output format.
 54const (
 55	LIST_TYPE_ORDERED = 1 << iota
 56	LIST_ITEM_CONTAINS_BLOCK
 57	LIST_ITEM_BEGINNING_OF_LIST
 58	LIST_ITEM_END_OF_LIST
 59)
 60
 61// These are the possible flag values for the table cell renderer.
 62// Only a single one of these values will be used; they are not ORed together.
 63// These are mostly of interest if you are writing a new output format.
 64const (
 65	TABLE_ALIGNMENT_LEFT = 1 << iota
 66	TABLE_ALIGNMENT_RIGHT
 67	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 68)
 69
 70// The size of a tab stop.
 71const (
 72	TAB_SIZE_DEFAULT = 4
 73	TAB_SIZE_EIGHT   = 8
 74)
 75
 76// These are the tags that are recognized as HTML block tags.
 77// Any of these can be included in markdown text without special escaping.
 78var blockTags = map[string]bool{
 79	"p":          true,
 80	"dl":         true,
 81	"h1":         true,
 82	"h2":         true,
 83	"h3":         true,
 84	"h4":         true,
 85	"h5":         true,
 86	"h6":         true,
 87	"ol":         true,
 88	"ul":         true,
 89	"del":        true,
 90	"div":        true,
 91	"ins":        true,
 92	"pre":        true,
 93	"form":       true,
 94	"math":       true,
 95	"table":      true,
 96	"iframe":     true,
 97	"script":     true,
 98	"fieldset":   true,
 99	"noscript":   true,
100	"blockquote": true,
101}
102
103// Renderer is the rendering interface.
104// This is mostly of interest if you are implementing a new rendering format.
105//
106// When a byte slice is provided, it contains the (rendered) contents of the
107// element.
108//
109// When a callback is provided instead, it will write the contents of the
110// respective element directly to the output buffer and return true on success.
111// If the callback returns false, the rendering function should reset the
112// output buffer as though it had never been called.
113//
114// Currently Html and Latex implementations are provided
115type Renderer interface {
116	// block-level callbacks
117	BlockCode(out *bytes.Buffer, text []byte, lang string)
118	BlockQuote(out *bytes.Buffer, text []byte)
119	BlockHtml(out *bytes.Buffer, text []byte)
120	Header(out *bytes.Buffer, text func() bool, level int)
121	HRule(out *bytes.Buffer)
122	List(out *bytes.Buffer, text func() bool, flags int)
123	ListItem(out *bytes.Buffer, text []byte, flags int)
124	Paragraph(out *bytes.Buffer, text func() bool)
125	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
126	TableRow(out *bytes.Buffer, text []byte)
127	TableCell(out *bytes.Buffer, text []byte, flags int)
128
129	// Span-level callbacks
130	AutoLink(out *bytes.Buffer, link []byte, kind int)
131	CodeSpan(out *bytes.Buffer, text []byte)
132	DoubleEmphasis(out *bytes.Buffer, text []byte)
133	Emphasis(out *bytes.Buffer, text []byte)
134	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
135	LineBreak(out *bytes.Buffer)
136	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
137	RawHtmlTag(out *bytes.Buffer, tag []byte)
138	TripleEmphasis(out *bytes.Buffer, text []byte)
139	StrikeThrough(out *bytes.Buffer, text []byte)
140
141	// Low-level callbacks
142	Entity(out *bytes.Buffer, entity []byte)
143	NormalText(out *bytes.Buffer, text []byte)
144
145	// Header and footer
146	DocumentHeader(out *bytes.Buffer)
147	DocumentFooter(out *bytes.Buffer)
148}
149
150// Callback functions for inline parsing. One such function is defined
151// for each character that triggers a response when parsing inline data.
152type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
153
154// Parser holds runtime state used by the parser.
155// This is constructed by the Markdown function.
156type parser struct {
157	r              Renderer
158	refs           map[string]*reference
159	inlineCallback [256]inlineParser
160	flags          int
161	nesting        int
162	maxNesting     int
163	insideLink     bool
164}
165
166//
167//
168// Public interface
169//
170//
171
172// MarkdownBasic is a convenience function for simple rendering.
173// It processes markdown input with no extensions enabled.
174func MarkdownBasic(input []byte) []byte {
175	// set up the HTML renderer
176	htmlFlags := HTML_USE_XHTML
177	renderer := HtmlRenderer(htmlFlags, "", "")
178
179	// set up the parser
180	extensions := 0
181
182	return Markdown(input, renderer, extensions)
183}
184
185// Call Markdown with most useful extensions enabled
186// MarkdownCommon is a convenience function for simple rendering.
187// It processes markdown input with common extensions enabled, including:
188//
189// * Smartypants processing with smart fractions and LaTeX dashes
190//
191// * Intra-word emphasis supression
192//
193// * Tables
194//
195// * Fenced code blocks
196//
197// * Autolinking
198//
199// * Strikethrough support
200//
201// * Strict header parsing
202func MarkdownCommon(input []byte) []byte {
203	// set up the HTML renderer
204	htmlFlags := 0
205	htmlFlags |= HTML_USE_XHTML
206	htmlFlags |= HTML_USE_SMARTYPANTS
207	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
208	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
209	renderer := HtmlRenderer(htmlFlags, "", "")
210
211	// set up the parser
212	extensions := 0
213	extensions |= EXTENSION_NO_INTRA_EMPHASIS
214	extensions |= EXTENSION_TABLES
215	extensions |= EXTENSION_FENCED_CODE
216	extensions |= EXTENSION_AUTOLINK
217	extensions |= EXTENSION_STRIKETHROUGH
218	extensions |= EXTENSION_SPACE_HEADERS
219
220	return Markdown(input, renderer, extensions)
221}
222
223// Markdown is the main rendering function.
224// It parses and renders a block of markdown-encoded text.
225// The supplied Renderer is used to format the output, and extensions dictates
226// which non-standard extensions are enabled.
227//
228// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
229// LatexRenderer, respectively.
230func Markdown(input []byte, renderer Renderer, extensions int) []byte {
231	// no point in parsing if we can't render
232	if renderer == nil {
233		return nil
234	}
235
236	// fill in the render structure
237	p := new(parser)
238	p.r = renderer
239	p.flags = extensions
240	p.refs = make(map[string]*reference)
241	p.maxNesting = 16
242	p.insideLink = false
243
244	// register inline parsers
245	p.inlineCallback['*'] = emphasis
246	p.inlineCallback['_'] = emphasis
247	if extensions&EXTENSION_STRIKETHROUGH != 0 {
248		p.inlineCallback['~'] = emphasis
249	}
250	p.inlineCallback['`'] = codeSpan
251	p.inlineCallback['\n'] = lineBreak
252	p.inlineCallback['['] = link
253	p.inlineCallback['<'] = leftAngle
254	p.inlineCallback['\\'] = escape
255	p.inlineCallback['&'] = entity
256
257	if extensions&EXTENSION_AUTOLINK != 0 {
258		p.inlineCallback[':'] = autoLink
259	}
260
261	first := firstPass(p, input)
262	second := secondPass(p, first)
263
264	return second
265}
266
267// first pass:
268// - extract references
269// - expand tabs
270// - normalize newlines
271// - copy everything else
272func firstPass(p *parser, input []byte) []byte {
273	var out bytes.Buffer
274	tabSize := TAB_SIZE_DEFAULT
275	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
276		tabSize = TAB_SIZE_EIGHT
277	}
278	beg, end := 0, 0
279	for beg < len(input) { // iterate over lines
280		if end = isReference(p, input[beg:]); end > 0 {
281			beg += end
282		} else { // skip to the next line
283			end = beg
284			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
285				end++
286			}
287
288			// add the line body if present
289			if end > beg {
290				expandTabs(&out, input[beg:end], tabSize)
291			}
292			out.WriteByte('\n')
293
294			if end < len(input) && input[end] == '\r' {
295				end++
296			}
297			if end < len(input) && input[end] == '\n' {
298				end++
299			}
300
301			beg = end
302		}
303	}
304
305	// empty input?
306	if out.Len() == 0 {
307		out.WriteByte('\n')
308	}
309
310	return out.Bytes()
311}
312
313// second pass: actual rendering
314func secondPass(p *parser, input []byte) []byte {
315	var output bytes.Buffer
316
317	p.r.DocumentHeader(&output)
318	p.block(&output, input)
319	p.r.DocumentFooter(&output)
320
321	if p.nesting != 0 {
322		panic("Nesting level did not end at zero")
323	}
324
325	return output.Bytes()
326}
327
328//
329// Link references
330//
331// This section implements support for references that (usually) appear
332// as footnotes in a document, and can be referenced anywhere in the document.
333// The basic format is:
334//
335//    [1]: http://www.google.com/ "Google"
336//    [2]: http://www.github.com/ "Github"
337//
338// Anywhere in the document, the reference can be linked by referring to its
339// label, i.e., 1 and 2 in this example, as in:
340//
341//    This library is hosted on [Github][2], a git hosting site.
342
343// References are parsed and stored in this struct.
344type reference struct {
345	link  []byte
346	title []byte
347}
348
349// Check whether or not data starts with a reference link.
350// If so, it is parsed and stored in the list of references
351// (in the render struct).
352// Returns the number of bytes to skip to move past it,
353// or zero if the first line is not a reference.
354func isReference(p *parser, data []byte) int {
355	// up to 3 optional leading spaces
356	if len(data) < 4 {
357		return 0
358	}
359	i := 0
360	for i < 3 && data[i] == ' ' {
361		i++
362	}
363
364	// id part: anything but a newline between brackets
365	if data[i] != '[' {
366		return 0
367	}
368	i++
369	idOffset := i
370	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
371		i++
372	}
373	if i >= len(data) || data[i] != ']' {
374		return 0
375	}
376	idEnd := i
377
378	// spacer: colon (space | tab)* newline? (space | tab)*
379	i++
380	if i >= len(data) || data[i] != ':' {
381		return 0
382	}
383	i++
384	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
385		i++
386	}
387	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
388		i++
389		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
390			i++
391		}
392	}
393	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
394		i++
395	}
396	if i >= len(data) {
397		return 0
398	}
399
400	// link: whitespace-free sequence, optionally between angle brackets
401	if data[i] == '<' {
402		i++
403	}
404	linkOffset := i
405	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
406		i++
407	}
408	linkEnd := i
409	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
410		linkOffset++
411		linkEnd--
412	}
413
414	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
415	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
416		i++
417	}
418	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
419		return 0
420	}
421
422	// compute end-of-line
423	lineEnd := 0
424	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
425		lineEnd = i
426	}
427	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
428		lineEnd++
429	}
430
431	// optional (space|tab)* spacer after a newline
432	if lineEnd > 0 {
433		i = lineEnd + 1
434		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
435			i++
436		}
437	}
438
439	// optional title: any non-newline sequence enclosed in '"() alone on its line
440	titleOffset, titleEnd := 0, 0
441	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
442		i++
443		titleOffset = i
444
445		// look for EOL
446		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
447			i++
448		}
449		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
450			titleEnd = i + 1
451		} else {
452			titleEnd = i
453		}
454
455		// step back
456		i--
457		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
458			i--
459		}
460		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
461			lineEnd = titleEnd
462			titleEnd = i
463		}
464	}
465	if lineEnd == 0 { // garbage after the link
466		return 0
467	}
468
469	// a valid ref has been found
470
471	// id matches are case-insensitive
472	id := string(bytes.ToLower(data[idOffset:idEnd]))
473	p.refs[id] = &reference{
474		link:  data[linkOffset:linkEnd],
475		title: data[titleOffset:titleEnd],
476	}
477
478	return lineEnd
479}
480
481//
482//
483// Miscellaneous helper functions
484//
485//
486
487// Test if a character is a punctuation symbol.
488// Taken from a private function in regexp in the stdlib.
489func ispunct(c byte) bool {
490	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
491		if c == r {
492			return true
493		}
494	}
495	return false
496}
497
498// Test if a character is a whitespace character.
499func isspace(c byte) bool {
500	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
501}
502
503// Test if a character is a letter or a digit.
504// TODO: check when this is looking for ASCII alnum and when it should use unicode
505func isalnum(c byte) bool {
506	return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
507}
508
509// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
510// always ends output with a newline
511func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
512	// first, check for common cases: no tabs, or only tabs at beginning of line
513	i, prefix := 0, 0
514	slowcase := false
515	for i = 0; i < len(line); i++ {
516		if line[i] == '\t' {
517			if prefix == i {
518				prefix++
519			} else {
520				slowcase = true
521				break
522			}
523		}
524	}
525
526	// no need to decode runes if all tabs are at the beginning of the line
527	if !slowcase {
528		for i = 0; i < prefix*tabSize; i++ {
529			out.WriteByte(' ')
530		}
531		out.Write(line[prefix:])
532		return
533	}
534
535	// the slow case: we need to count runes to figure out how
536	// many spaces to insert for each tab
537	column := 0
538	i = 0
539	for i < len(line) {
540		start := i
541		for i < len(line) && line[i] != '\t' {
542			_, size := utf8.DecodeRune(line[i:])
543			i += size
544			column++
545		}
546
547		if i > start {
548			out.Write(line[start:i])
549		}
550
551		if i >= len(line) {
552			break
553		}
554
555		for {
556			out.WriteByte(' ')
557			column++
558			if column%tabSize == 0 {
559				break
560			}
561		}
562
563		i++
564	}
565}