markdown.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11//
12// Markdown parsing and processing
13//
14//
15
16package blackfriday
17
18import (
19 "bytes"
20 "utf8"
21)
22
23const VERSION = "0.6"
24
25// These are the supported markdown parsing extensions.
26// OR these values together to select multiple extensions.
27const (
28 EXTENSION_NO_INTRA_EMPHASIS = 1 << iota
29 EXTENSION_TABLES
30 EXTENSION_FENCED_CODE
31 EXTENSION_AUTOLINK
32 EXTENSION_STRIKETHROUGH
33 EXTENSION_LAX_HTML_BLOCKS
34 EXTENSION_SPACE_HEADERS
35 EXTENSION_HARD_LINE_BREAK
36 EXTENSION_TAB_SIZE_EIGHT
37)
38
39// These are the possible flag values for the link renderer.
40// Only a single one of these values will be used; they are not ORed together.
41// These are mostly of interest if you are writing a new output format.
42const (
43 LINK_TYPE_NOT_AUTOLINK = iota
44 LINK_TYPE_NORMAL
45 LINK_TYPE_EMAIL
46)
47
48// These are the possible flag values for the listitem renderer.
49// Multiple flag values may be ORed together.
50// These are mostly of interest if you are writing a new output format.
51const (
52 LIST_TYPE_ORDERED = 1 << iota
53 LIST_ITEM_CONTAINS_BLOCK
54 LIST_ITEM_BEGINNING_OF_LIST
55 LIST_ITEM_END_OF_LIST
56)
57
58// These are the possible flag values for the table cell renderer.
59// Only a single one of these values will be used; they are not ORed together.
60// These are mostly of interest if you are writing a new output format.
61const (
62 TABLE_ALIGNMENT_LEFT = 1 << iota
63 TABLE_ALIGNMENT_RIGHT
64 TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
65)
66
67// The size of a tab stop.
68const (
69 TAB_SIZE_DEFAULT = 4
70 TAB_SIZE_EIGHT = 8
71)
72
73// These are the tags that are recognized as HTML block tags.
74// Any of these can be included in markdown text without special escaping.
75var blockTags = map[string]bool{
76 "p": true,
77 "dl": true,
78 "h1": true,
79 "h2": true,
80 "h3": true,
81 "h4": true,
82 "h5": true,
83 "h6": true,
84 "ol": true,
85 "ul": true,
86 "del": true,
87 "div": true,
88 "ins": true,
89 "pre": true,
90 "form": true,
91 "math": true,
92 "table": true,
93 "iframe": true,
94 "script": true,
95 "fieldset": true,
96 "noscript": true,
97 "blockquote": true,
98}
99
100// This interface defines the rendering interface.
101// This is mostly of interest if you are implementing a new rendering format.
102// Currently Html and Latex implementations are provided
103type Renderer interface {
104 // block-level callbacks
105 BlockCode(out *bytes.Buffer, text []byte, lang string)
106 BlockQuote(out *bytes.Buffer, text []byte)
107 BlockHtml(out *bytes.Buffer, text []byte)
108 Header(out *bytes.Buffer, text func() bool, level int)
109 HRule(out *bytes.Buffer)
110 List(out *bytes.Buffer, text func() bool, flags int)
111 ListItem(out *bytes.Buffer, text []byte, flags int)
112 Paragraph(out *bytes.Buffer, text func() bool)
113 Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
114 TableRow(out *bytes.Buffer, text []byte)
115 TableCell(out *bytes.Buffer, text []byte, flags int)
116
117 // Span-level callbacks
118 AutoLink(out *bytes.Buffer, link []byte, kind int)
119 CodeSpan(out *bytes.Buffer, text []byte)
120 DoubleEmphasis(out *bytes.Buffer, text []byte)
121 Emphasis(out *bytes.Buffer, text []byte)
122 Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
123 LineBreak(out *bytes.Buffer)
124 Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
125 RawHtmlTag(out *bytes.Buffer, tag []byte)
126 TripleEmphasis(out *bytes.Buffer, text []byte)
127 StrikeThrough(out *bytes.Buffer, text []byte)
128
129 // Low-level callbacks
130 Entity(out *bytes.Buffer, entity []byte)
131 NormalText(out *bytes.Buffer, text []byte)
132
133 // Header and footer
134 DocumentHeader(out *bytes.Buffer)
135 DocumentFooter(out *bytes.Buffer)
136}
137
138// Callback functions for inline parsing. One such function is defined
139// for each character that triggers a response when parsing inline data.
140type inlineParser func(parser *Parser, out *bytes.Buffer, data []byte, offset int) int
141
142// The main parser object.
143// This is constructed by the Markdown function and
144// contains state used during the parsing process.
145type Parser struct {
146 r Renderer
147 refs map[string]*reference
148 inline [256]inlineParser
149 flags int
150 nesting int
151 maxNesting int
152 insideLink bool
153}
154
155
156//
157//
158// Public interface
159//
160//
161
162// Call Markdown with no extensions
163func MarkdownBasic(input []byte) []byte {
164 // set up the HTML renderer
165 htmlFlags := HTML_USE_XHTML
166 renderer := HtmlRenderer(htmlFlags, "", "")
167
168 // set up the parser
169 extensions := 0
170
171 return Markdown(input, renderer, extensions)
172}
173
174// Call Markdown with most useful extensions enabled
175func MarkdownCommon(input []byte) []byte {
176 // set up the HTML renderer
177 htmlFlags := 0
178 htmlFlags |= HTML_USE_XHTML
179 htmlFlags |= HTML_USE_SMARTYPANTS
180 htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
181 htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
182 renderer := HtmlRenderer(htmlFlags, "", "")
183
184 // set up the parser
185 extensions := 0
186 extensions |= EXTENSION_NO_INTRA_EMPHASIS
187 extensions |= EXTENSION_TABLES
188 extensions |= EXTENSION_FENCED_CODE
189 extensions |= EXTENSION_AUTOLINK
190 extensions |= EXTENSION_STRIKETHROUGH
191 extensions |= EXTENSION_SPACE_HEADERS
192
193 return Markdown(input, renderer, extensions)
194}
195
196// Parse and render a block of markdown-encoded text.
197// The renderer is used to format the output, and extensions dictates which
198// non-standard extensions are enabled.
199func Markdown(input []byte, renderer Renderer, extensions int) []byte {
200 // no point in parsing if we can't render
201 if renderer == nil {
202 return nil
203 }
204
205 // fill in the render structure
206 parser := new(Parser)
207 parser.r = renderer
208 parser.flags = extensions
209 parser.refs = make(map[string]*reference)
210 parser.maxNesting = 16
211 parser.insideLink = false
212
213 // register inline parsers
214 parser.inline['*'] = inlineEmphasis
215 parser.inline['_'] = inlineEmphasis
216 if extensions&EXTENSION_STRIKETHROUGH != 0 {
217 parser.inline['~'] = inlineEmphasis
218 }
219 parser.inline['`'] = inlineCodeSpan
220 parser.inline['\n'] = inlineLineBreak
221 parser.inline['['] = inlineLink
222 parser.inline['<'] = inlineLAngle
223 parser.inline['\\'] = inlineEscape
224 parser.inline['&'] = inlineEntity
225
226 if extensions&EXTENSION_AUTOLINK != 0 {
227 parser.inline[':'] = inlineAutoLink
228 }
229
230 first := firstPass(parser, input)
231 second := secondPass(parser, first)
232
233 return second
234}
235
236// first pass:
237// - extract references
238// - expand tabs
239// - normalize newlines
240// - copy everything else
241func firstPass(parser *Parser, input []byte) []byte {
242 var out bytes.Buffer
243 tabSize := TAB_SIZE_DEFAULT
244 if parser.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
245 tabSize = TAB_SIZE_EIGHT
246 }
247 beg, end := 0, 0
248 for beg < len(input) { // iterate over lines
249 if end = isReference(parser, input[beg:]); end > 0 {
250 beg += end
251 } else { // skip to the next line
252 end = beg
253 for end < len(input) && input[end] != '\n' && input[end] != '\r' {
254 end++
255 }
256
257 // add the line body if present
258 if end > beg {
259 expandTabs(&out, input[beg:end], tabSize)
260 }
261 out.WriteByte('\n')
262
263 if end < len(input) && input[end] == '\r' {
264 end++
265 }
266 if end < len(input) && input[end] == '\n' {
267 end++
268 }
269
270 beg = end
271 }
272 }
273 return out.Bytes()
274}
275
276// second pass: actual rendering
277func secondPass(parser *Parser, input []byte) []byte {
278 var output bytes.Buffer
279
280 parser.r.DocumentHeader(&output)
281 parser.parseBlock(&output, input)
282 parser.r.DocumentFooter(&output)
283
284 if parser.nesting != 0 {
285 panic("Nesting level did not end at zero")
286 }
287
288 return output.Bytes()
289}
290
291
292//
293// Link references
294//
295// This section implements support for references that (usually) appear
296// as footnotes in a document, and can be referenced anywhere in the document.
297// The basic format is:
298//
299// [1]: http://www.google.com/ "Google"
300// [2]: http://www.github.com/ "Github"
301//
302// Anywhere in the document, the reference can be linked by referring to its
303// label, i.e., 1 and 2 in this example, as in:
304//
305// This library is hosted on [Github][2], a git hosting site.
306
307// References are parsed and stored in this struct.
308type reference struct {
309 link []byte
310 title []byte
311}
312
313// Check whether or not data starts with a reference link.
314// If so, it is parsed and stored in the list of references
315// (in the render struct).
316// Returns the number of bytes to skip to move past it,
317// or zero if the first line is not a reference.
318func isReference(parser *Parser, data []byte) int {
319 // up to 3 optional leading spaces
320 if len(data) < 4 {
321 return 0
322 }
323 i := 0
324 for i < 3 && data[i] == ' ' {
325 i++
326 }
327
328 // id part: anything but a newline between brackets
329 if data[i] != '[' {
330 return 0
331 }
332 i++
333 idOffset := i
334 for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
335 i++
336 }
337 if i >= len(data) || data[i] != ']' {
338 return 0
339 }
340 idEnd := i
341
342 // spacer: colon (space | tab)* newline? (space | tab)*
343 i++
344 if i >= len(data) || data[i] != ':' {
345 return 0
346 }
347 i++
348 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
349 i++
350 }
351 if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
352 i++
353 if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
354 i++
355 }
356 }
357 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
358 i++
359 }
360 if i >= len(data) {
361 return 0
362 }
363
364 // link: whitespace-free sequence, optionally between angle brackets
365 if data[i] == '<' {
366 i++
367 }
368 linkOffset := i
369 for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
370 i++
371 }
372 linkEnd := i
373 if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
374 linkOffset++
375 linkEnd--
376 }
377
378 // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
379 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
380 i++
381 }
382 if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
383 return 0
384 }
385
386 // compute end-of-line
387 lineEnd := 0
388 if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
389 lineEnd = i
390 }
391 if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
392 lineEnd++
393 }
394
395 // optional (space|tab)* spacer after a newline
396 if lineEnd > 0 {
397 i = lineEnd + 1
398 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
399 i++
400 }
401 }
402
403 // optional title: any non-newline sequence enclosed in '"() alone on its line
404 titleOffset, titleEnd := 0, 0
405 if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
406 i++
407 titleOffset = i
408
409 // look for EOL
410 for i < len(data) && data[i] != '\n' && data[i] != '\r' {
411 i++
412 }
413 if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
414 titleEnd = i + 1
415 } else {
416 titleEnd = i
417 }
418
419 // step back
420 i--
421 for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
422 i--
423 }
424 if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
425 lineEnd = titleEnd
426 titleEnd = i
427 }
428 }
429 if lineEnd == 0 { // garbage after the link
430 return 0
431 }
432
433 // a valid ref has been found
434 if parser == nil {
435 return lineEnd
436 }
437
438 // id matches are case-insensitive
439 id := string(bytes.ToLower(data[idOffset:idEnd]))
440 parser.refs[id] = &reference{
441 link: data[linkOffset:linkEnd],
442 title: data[titleOffset:titleEnd],
443 }
444
445 return lineEnd
446}
447
448
449//
450//
451// Miscellaneous helper functions
452//
453//
454
455
456// Test if a character is a punctuation symbol.
457// Taken from a private function in regexp in the stdlib.
458func ispunct(c byte) bool {
459 for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
460 if c == r {
461 return true
462 }
463 }
464 return false
465}
466
467// Test if a character is a whitespace character.
468func isspace(c byte) bool {
469 return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
470}
471
472// Test if a character is a letter or a digit.
473// TODO: check when this is looking for ASCII alnum and when it should use unicode
474func isalnum(c byte) bool {
475 return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
476}
477
478// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
479// always ends output with a newline
480func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
481 // first, check for common cases: no tabs, or only tabs at beginning of line
482 i, prefix := 0, 0
483 slowcase := false
484 for i = 0; i < len(line); i++ {
485 if line[i] == '\t' {
486 if prefix == i {
487 prefix++
488 } else {
489 slowcase = true
490 break
491 }
492 }
493 }
494
495 // no need to decode runes if all tabs are at the beginning of the line
496 if !slowcase {
497 for i = 0; i < prefix*tabSize; i++ {
498 out.WriteByte(' ')
499 }
500 out.Write(line[prefix:])
501 return
502 }
503
504 // the slow case: we need to count runes to figure out how
505 // many spaces to insert for each tab
506 column := 0
507 i = 0
508 for i < len(line) {
509 start := i
510 for i < len(line) && line[i] != '\t' {
511 _, size := utf8.DecodeRune(line[i:])
512 i += size
513 column++
514 }
515
516 if i > start {
517 out.Write(line[start:i])
518 }
519
520 if i >= len(line) {
521 break
522 }
523
524 for {
525 out.WriteByte(' ')
526 column++
527 if column%tabSize == 0 {
528 break
529 }
530 }
531
532 i++
533 }
534}