all repos — grayfriday @ 9328516bb9f32ab57a3a8d2d97dbf3e60bbfbed5

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS          = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                                 // render tables
 33	EXTENSION_FENCED_CODE                            // render fenced code blocks
 34	EXTENSION_AUTOLINK                               // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                          // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS                        // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                          // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK                        // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                         // expand tabs to eight spaces instead of four
 40	EXTENSION_FOOTNOTES                              // Pandoc-style footnotes
 41	EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK             // No need to insert an empty line to start a (code, quote, order list, unorder list)block
 42	EXTENSION_HEADER_IDS                             // specify header IDs  with {#id}
 43	EXTENSION_TITLEBLOCK                             // Titleblock ala pandoc
 44)
 45
 46// These are the possible flag values for the link renderer.
 47// Only a single one of these values will be used; they are not ORed together.
 48// These are mostly of interest if you are writing a new output format.
 49const (
 50	LINK_TYPE_NOT_AUTOLINK = iota
 51	LINK_TYPE_NORMAL
 52	LINK_TYPE_EMAIL
 53)
 54
 55// These are the possible flag values for the ListItem renderer.
 56// Multiple flag values may be ORed together.
 57// These are mostly of interest if you are writing a new output format.
 58const (
 59	LIST_TYPE_ORDERED = 1 << iota
 60	LIST_ITEM_CONTAINS_BLOCK
 61	LIST_ITEM_BEGINNING_OF_LIST
 62	LIST_ITEM_END_OF_LIST
 63)
 64
 65// These are the possible flag values for the table cell renderer.
 66// Only a single one of these values will be used; they are not ORed together.
 67// These are mostly of interest if you are writing a new output format.
 68const (
 69	TABLE_ALIGNMENT_LEFT = 1 << iota
 70	TABLE_ALIGNMENT_RIGHT
 71	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 72)
 73
 74// The size of a tab stop.
 75const (
 76	TAB_SIZE_DEFAULT = 4
 77	TAB_SIZE_EIGHT   = 8
 78)
 79
 80// These are the tags that are recognized as HTML block tags.
 81// Any of these can be included in markdown text without special escaping.
 82var blockTags = map[string]bool{
 83	"p":          true,
 84	"dl":         true,
 85	"h1":         true,
 86	"h2":         true,
 87	"h3":         true,
 88	"h4":         true,
 89	"h5":         true,
 90	"h6":         true,
 91	"ol":         true,
 92	"ul":         true,
 93	"del":        true,
 94	"div":        true,
 95	"ins":        true,
 96	"pre":        true,
 97	"form":       true,
 98	"math":       true,
 99	"table":      true,
100	"iframe":     true,
101	"script":     true,
102	"fieldset":   true,
103	"noscript":   true,
104	"blockquote": true,
105
106	// HTML5
107	"video":      true,
108	"aside":      true,
109	"canvas":     true,
110	"figure":     true,
111	"footer":     true,
112	"header":     true,
113	"hgroup":     true,
114	"output":     true,
115	"article":    true,
116	"section":    true,
117	"progress":   true,
118	"figcaption": true,
119}
120
121// Renderer is the rendering interface.
122// This is mostly of interest if you are implementing a new rendering format.
123//
124// When a byte slice is provided, it contains the (rendered) contents of the
125// element.
126//
127// When a callback is provided instead, it will write the contents of the
128// respective element directly to the output buffer and return true on success.
129// If the callback returns false, the rendering function should reset the
130// output buffer as though it had never been called.
131//
132// Currently Html and Latex implementations are provided
133type Renderer interface {
134	// block-level callbacks
135	BlockCode(out *bytes.Buffer, text []byte, lang string)
136	BlockQuote(out *bytes.Buffer, text []byte)
137	BlockHtml(out *bytes.Buffer, text []byte)
138	Header(out *bytes.Buffer, text func() bool, level int, id string)
139	HRule(out *bytes.Buffer)
140	List(out *bytes.Buffer, text func() bool, flags int)
141	ListItem(out *bytes.Buffer, text []byte, flags int)
142	Paragraph(out *bytes.Buffer, text func() bool)
143	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
144	TableRow(out *bytes.Buffer, text []byte)
145	TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
146	TableCell(out *bytes.Buffer, text []byte, flags int)
147	Footnotes(out *bytes.Buffer, text func() bool)
148	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
149	TitleBlock(out *bytes.Buffer, text []byte)
150
151	// Span-level callbacks
152	AutoLink(out *bytes.Buffer, link []byte, kind int)
153	CodeSpan(out *bytes.Buffer, text []byte)
154	DoubleEmphasis(out *bytes.Buffer, text []byte)
155	Emphasis(out *bytes.Buffer, text []byte)
156	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
157	LineBreak(out *bytes.Buffer)
158	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
159	RawHtmlTag(out *bytes.Buffer, tag []byte)
160	TripleEmphasis(out *bytes.Buffer, text []byte)
161	StrikeThrough(out *bytes.Buffer, text []byte)
162	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
163
164	// Low-level callbacks
165	Entity(out *bytes.Buffer, entity []byte)
166	NormalText(out *bytes.Buffer, text []byte)
167
168	// Header and footer
169	DocumentHeader(out *bytes.Buffer)
170	DocumentFooter(out *bytes.Buffer)
171
172	GetFlags() int
173}
174
175// Callback functions for inline parsing. One such function is defined
176// for each character that triggers a response when parsing inline data.
177type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
178
179// Parser holds runtime state used by the parser.
180// This is constructed by the Markdown function.
181type parser struct {
182	r              Renderer
183	refs           map[string]*reference
184	inlineCallback [256]inlineParser
185	flags          int
186	nesting        int
187	maxNesting     int
188	insideLink     bool
189
190	// Footnotes need to be ordered as well as available to quickly check for
191	// presence. If a ref is also a footnote, it's stored both in refs and here
192	// in notes. Slice is nil if footnotes not enabled.
193	notes []*reference
194}
195
196//
197//
198// Public interface
199//
200//
201
202// MarkdownBasic is a convenience function for simple rendering.
203// It processes markdown input with no extensions enabled.
204func MarkdownBasic(input []byte) []byte {
205	// set up the HTML renderer
206	htmlFlags := HTML_USE_XHTML
207	renderer := HtmlRenderer(htmlFlags, "", "")
208
209	// set up the parser
210	extensions := 0
211
212	return Markdown(input, renderer, extensions)
213}
214
215// Call Markdown with most useful extensions enabled
216// MarkdownCommon is a convenience function for simple rendering.
217// It processes markdown input with common extensions enabled, including:
218//
219// * Smartypants processing with smart fractions and LaTeX dashes
220//
221// * Intra-word emphasis suppression
222//
223// * Tables
224//
225// * Fenced code blocks
226//
227// * Autolinking
228//
229// * Strikethrough support
230//
231// * Strict header parsing
232//
233// * Custom Header IDs
234func MarkdownCommon(input []byte) []byte {
235	// set up the HTML renderer
236	htmlFlags := 0
237	htmlFlags |= HTML_USE_XHTML
238	htmlFlags |= HTML_USE_SMARTYPANTS
239	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
240	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
241	htmlFlags |= HTML_SANITIZE_OUTPUT
242	renderer := HtmlRenderer(htmlFlags, "", "")
243
244	// set up the parser
245	extensions := 0
246	extensions |= EXTENSION_NO_INTRA_EMPHASIS
247	extensions |= EXTENSION_TABLES
248	extensions |= EXTENSION_FENCED_CODE
249	extensions |= EXTENSION_AUTOLINK
250	extensions |= EXTENSION_STRIKETHROUGH
251	extensions |= EXTENSION_SPACE_HEADERS
252	extensions |= EXTENSION_HEADER_IDS
253
254	return Markdown(input, renderer, extensions)
255}
256
257// Markdown is the main rendering function.
258// It parses and renders a block of markdown-encoded text.
259// The supplied Renderer is used to format the output, and extensions dictates
260// which non-standard extensions are enabled.
261//
262// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
263// LatexRenderer, respectively.
264func Markdown(input []byte, renderer Renderer, extensions int) []byte {
265	// no point in parsing if we can't render
266	if renderer == nil {
267		return nil
268	}
269
270	// fill in the render structure
271	p := new(parser)
272	p.r = renderer
273	p.flags = extensions
274	p.refs = make(map[string]*reference)
275	p.maxNesting = 16
276	p.insideLink = false
277
278	// register inline parsers
279	p.inlineCallback['*'] = emphasis
280	p.inlineCallback['_'] = emphasis
281	if extensions&EXTENSION_STRIKETHROUGH != 0 {
282		p.inlineCallback['~'] = emphasis
283	}
284	p.inlineCallback['`'] = codeSpan
285	p.inlineCallback['\n'] = lineBreak
286	p.inlineCallback['['] = link
287	p.inlineCallback['<'] = leftAngle
288	p.inlineCallback['\\'] = escape
289	p.inlineCallback['&'] = entity
290
291	if extensions&EXTENSION_AUTOLINK != 0 {
292		p.inlineCallback[':'] = autoLink
293	}
294
295	if extensions&EXTENSION_FOOTNOTES != 0 {
296		p.notes = make([]*reference, 0)
297	}
298
299	first := firstPass(p, input)
300	second := secondPass(p, first)
301
302	if renderer.GetFlags()&HTML_SANITIZE_OUTPUT != 0 {
303		second = sanitizeHtmlSafe(second)
304	}
305
306	return second
307}
308
309// first pass:
310// - extract references
311// - expand tabs
312// - normalize newlines
313// - copy everything else
314// - add missing newlines before fenced code blocks
315func firstPass(p *parser, input []byte) []byte {
316	var out bytes.Buffer
317	tabSize := TAB_SIZE_DEFAULT
318	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
319		tabSize = TAB_SIZE_EIGHT
320	}
321	beg, end := 0, 0
322	lastLineWasBlank := false
323	lastFencedCodeBlockEnd := 0
324	for beg < len(input) { // iterate over lines
325		if end = isReference(p, input[beg:], tabSize); end > 0 {
326			beg += end
327		} else { // skip to the next line
328			end = beg
329			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
330				end++
331			}
332
333			if p.flags&EXTENSION_FENCED_CODE != 0 {
334				// when last line was none blank and a fenced code block comes after
335				if beg >= lastFencedCodeBlockEnd {
336					// tmp var so we don't modify beyond bounds of `input`
337					var tmp = make([]byte, len(input[beg:]), len(input[beg:])+1)
338					copy(tmp, input[beg:])
339					if i := p.fencedCode(&out, append(tmp, '\n'), false); i > 0 {
340						if !lastLineWasBlank {
341							out.WriteByte('\n') // need to inject additional linebreak
342						}
343						lastFencedCodeBlockEnd = beg + i
344					}
345				}
346				lastLineWasBlank = end == beg
347			}
348
349			// add the line body if present
350			if end > beg {
351				if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
352					out.Write(input[beg:end])
353				} else {
354					expandTabs(&out, input[beg:end], tabSize)
355				}
356			}
357			out.WriteByte('\n')
358
359			if end < len(input) && input[end] == '\r' {
360				end++
361			}
362			if end < len(input) && input[end] == '\n' {
363				end++
364			}
365
366			beg = end
367		}
368	}
369
370	// empty input?
371	if out.Len() == 0 {
372		out.WriteByte('\n')
373	}
374
375	return out.Bytes()
376}
377
378// second pass: actual rendering
379func secondPass(p *parser, input []byte) []byte {
380	var output bytes.Buffer
381
382	p.r.DocumentHeader(&output)
383	p.block(&output, input)
384
385	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
386		p.r.Footnotes(&output, func() bool {
387			flags := LIST_ITEM_BEGINNING_OF_LIST
388			for _, ref := range p.notes {
389				var buf bytes.Buffer
390				if ref.hasBlock {
391					flags |= LIST_ITEM_CONTAINS_BLOCK
392					p.block(&buf, ref.title)
393				} else {
394					p.inline(&buf, ref.title)
395				}
396				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
397				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
398			}
399
400			return true
401		})
402	}
403
404	p.r.DocumentFooter(&output)
405
406	if p.nesting != 0 {
407		panic("Nesting level did not end at zero")
408	}
409
410	return output.Bytes()
411}
412
413//
414// Link references
415//
416// This section implements support for references that (usually) appear
417// as footnotes in a document, and can be referenced anywhere in the document.
418// The basic format is:
419//
420//    [1]: http://www.google.com/ "Google"
421//    [2]: http://www.github.com/ "Github"
422//
423// Anywhere in the document, the reference can be linked by referring to its
424// label, i.e., 1 and 2 in this example, as in:
425//
426//    This library is hosted on [Github][2], a git hosting site.
427//
428// Actual footnotes as specified in Pandoc and supported by some other Markdown
429// libraries such as php-markdown are also taken care of. They look like this:
430//
431//    This sentence needs a bit of further explanation.[^note]
432//
433//    [^note]: This is the explanation.
434//
435// Footnotes should be placed at the end of the document in an ordered list.
436// Inline footnotes such as:
437//
438//    Inline footnotes^[Not supported.] also exist.
439//
440// are not yet supported.
441
442// References are parsed and stored in this struct.
443type reference struct {
444	link     []byte
445	title    []byte
446	noteId   int // 0 if not a footnote ref
447	hasBlock bool
448}
449
450// Check whether or not data starts with a reference link.
451// If so, it is parsed and stored in the list of references
452// (in the render struct).
453// Returns the number of bytes to skip to move past it,
454// or zero if the first line is not a reference.
455func isReference(p *parser, data []byte, tabSize int) int {
456	// up to 3 optional leading spaces
457	if len(data) < 4 {
458		return 0
459	}
460	i := 0
461	for i < 3 && data[i] == ' ' {
462		i++
463	}
464
465	noteId := 0
466
467	// id part: anything but a newline between brackets
468	if data[i] != '[' {
469		return 0
470	}
471	i++
472	if p.flags&EXTENSION_FOOTNOTES != 0 {
473		if data[i] == '^' {
474			// we can set it to anything here because the proper noteIds will
475			// be assigned later during the second pass. It just has to be != 0
476			noteId = 1
477			i++
478		}
479	}
480	idOffset := i
481	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
482		i++
483	}
484	if i >= len(data) || data[i] != ']' {
485		return 0
486	}
487	idEnd := i
488
489	// spacer: colon (space | tab)* newline? (space | tab)*
490	i++
491	if i >= len(data) || data[i] != ':' {
492		return 0
493	}
494	i++
495	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
496		i++
497	}
498	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
499		i++
500		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
501			i++
502		}
503	}
504	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
505		i++
506	}
507	if i >= len(data) {
508		return 0
509	}
510
511	var (
512		linkOffset, linkEnd   int
513		titleOffset, titleEnd int
514		lineEnd               int
515		raw                   []byte
516		hasBlock              bool
517	)
518
519	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
520		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
521		lineEnd = linkEnd
522	} else {
523		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
524	}
525	if lineEnd == 0 {
526		return 0
527	}
528
529	// a valid ref has been found
530
531	ref := &reference{
532		noteId:   noteId,
533		hasBlock: hasBlock,
534	}
535
536	if noteId > 0 {
537		// reusing the link field for the id since footnotes don't have links
538		ref.link = data[idOffset:idEnd]
539		// if footnote, it's not really a title, it's the contained text
540		ref.title = raw
541	} else {
542		ref.link = data[linkOffset:linkEnd]
543		ref.title = data[titleOffset:titleEnd]
544	}
545
546	// id matches are case-insensitive
547	id := string(bytes.ToLower(data[idOffset:idEnd]))
548
549	p.refs[id] = ref
550
551	return lineEnd
552}
553
554func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
555	// link: whitespace-free sequence, optionally between angle brackets
556	if data[i] == '<' {
557		i++
558	}
559	linkOffset = i
560	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
561		i++
562	}
563	linkEnd = i
564	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
565		linkOffset++
566		linkEnd--
567	}
568
569	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
570	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
571		i++
572	}
573	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
574		return
575	}
576
577	// compute end-of-line
578	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
579		lineEnd = i
580	}
581	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
582		lineEnd++
583	}
584
585	// optional (space|tab)* spacer after a newline
586	if lineEnd > 0 {
587		i = lineEnd + 1
588		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
589			i++
590		}
591	}
592
593	// optional title: any non-newline sequence enclosed in '"() alone on its line
594	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
595		i++
596		titleOffset = i
597
598		// look for EOL
599		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
600			i++
601		}
602		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
603			titleEnd = i + 1
604		} else {
605			titleEnd = i
606		}
607
608		// step back
609		i--
610		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
611			i--
612		}
613		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
614			lineEnd = titleEnd
615			titleEnd = i
616		}
617	}
618
619	return
620}
621
622// The first bit of this logic is the same as (*parser).listItem, but the rest
623// is much simpler. This function simply finds the entire block and shifts it
624// over by one tab if it is indeed a block (just returns the line if it's not).
625// blockEnd is the end of the section in the input buffer, and contents is the
626// extracted text that was shifted over one tab. It will need to be rendered at
627// the end of the document.
628func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
629	if i == 0 || len(data) == 0 {
630		return
631	}
632
633	// skip leading whitespace on first line
634	for i < len(data) && data[i] == ' ' {
635		i++
636	}
637
638	blockStart = i
639
640	// find the end of the line
641	blockEnd = i
642	for i < len(data) && data[i-1] != '\n' {
643		i++
644	}
645
646	// get working buffer
647	var raw bytes.Buffer
648
649	// put the first line into the working buffer
650	raw.Write(data[blockEnd:i])
651	blockEnd = i
652
653	// process the following lines
654	containsBlankLine := false
655
656gatherLines:
657	for blockEnd < len(data) {
658		i++
659
660		// find the end of this line
661		for i < len(data) && data[i-1] != '\n' {
662			i++
663		}
664
665		// if it is an empty line, guess that it is part of this item
666		// and move on to the next line
667		if p.isEmpty(data[blockEnd:i]) > 0 {
668			containsBlankLine = true
669			blockEnd = i
670			continue
671		}
672
673		n := 0
674		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
675			// this is the end of the block.
676			// we don't want to include this last line in the index.
677			break gatherLines
678		}
679
680		// if there were blank lines before this one, insert a new one now
681		if containsBlankLine {
682			raw.WriteByte('\n')
683			containsBlankLine = false
684		}
685
686		// get rid of that first tab, write to buffer
687		raw.Write(data[blockEnd+n : i])
688		hasBlock = true
689
690		blockEnd = i
691	}
692
693	if data[blockEnd-1] != '\n' {
694		raw.WriteByte('\n')
695	}
696
697	contents = raw.Bytes()
698
699	return
700}
701
702//
703//
704// Miscellaneous helper functions
705//
706//
707
708// Test if a character is a punctuation symbol.
709// Taken from a private function in regexp in the stdlib.
710func ispunct(c byte) bool {
711	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
712		if c == r {
713			return true
714		}
715	}
716	return false
717}
718
719// Test if a character is a whitespace character.
720func isspace(c byte) bool {
721	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
722}
723
724// Test if a character is letter.
725func isletter(c byte) bool {
726	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
727}
728
729// Test if a character is a letter or a digit.
730// TODO: check when this is looking for ASCII alnum and when it should use unicode
731func isalnum(c byte) bool {
732	return (c >= '0' && c <= '9') || isletter(c)
733}
734
735// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
736// always ends output with a newline
737func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
738	// first, check for common cases: no tabs, or only tabs at beginning of line
739	i, prefix := 0, 0
740	slowcase := false
741	for i = 0; i < len(line); i++ {
742		if line[i] == '\t' {
743			if prefix == i {
744				prefix++
745			} else {
746				slowcase = true
747				break
748			}
749		}
750	}
751
752	// no need to decode runes if all tabs are at the beginning of the line
753	if !slowcase {
754		for i = 0; i < prefix*tabSize; i++ {
755			out.WriteByte(' ')
756		}
757		out.Write(line[prefix:])
758		return
759	}
760
761	// the slow case: we need to count runes to figure out how
762	// many spaces to insert for each tab
763	column := 0
764	i = 0
765	for i < len(line) {
766		start := i
767		for i < len(line) && line[i] != '\t' {
768			_, size := utf8.DecodeRune(line[i:])
769			i += size
770			column++
771		}
772
773		if i > start {
774			out.Write(line[start:i])
775		}
776
777		if i >= len(line) {
778			break
779		}
780
781		for {
782			out.WriteByte(' ')
783			column++
784			if column%tabSize == 0 {
785				break
786			}
787		}
788
789		i++
790	}
791}
792
793// Find if a line counts as indented or not.
794// Returns number of characters the indent is (0 = not indented).
795func isIndented(data []byte, indentSize int) int {
796	if len(data) == 0 {
797		return 0
798	}
799	if data[0] == '\t' {
800		return 1
801	}
802	if len(data) < indentSize {
803		return 0
804	}
805	for i := 0; i < indentSize; i++ {
806		if data[i] != ' ' {
807			return 0
808		}
809	}
810	return indentSize
811}
812
813// Create a url-safe slug for fragments
814func slugify(in []byte) []byte {
815	if len(in) == 0 {
816		return in
817	}
818	out := make([]byte, 0, len(in))
819	sym := false
820
821	for _, ch := range in {
822		if isalnum(ch) {
823			sym = false
824			out = append(out, ch)
825		} else if sym {
826			continue
827		} else {
828			out = append(out, '-')
829			sym = true
830		}
831	}
832	var a, b int
833	var ch byte
834	for a, ch = range out {
835		if ch != '-' {
836			break
837		}
838	}
839	for b = len(out) - 1; b > 0; b-- {
840		if out[b] != '-' {
841			break
842		}
843	}
844	return out[a : b+1]
845}