all repos — grayfriday @ cb288d6b5d02f1a7be20cf6efbdb63d4164b8e67

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS          = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                                 // render tables
 33	EXTENSION_FENCED_CODE                            // render fenced code blocks
 34	EXTENSION_AUTOLINK                               // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                          // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS                        // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                          // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK                        // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                         // expand tabs to eight spaces instead of four
 40	EXTENSION_FOOTNOTES                              // Pandoc-style footnotes
 41	EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK             // No need to insert an empty line to start a (code, quote, order list, unorder list)block
 42)
 43
 44// These are the possible flag values for the link renderer.
 45// Only a single one of these values will be used; they are not ORed together.
 46// These are mostly of interest if you are writing a new output format.
 47const (
 48	LINK_TYPE_NOT_AUTOLINK = iota
 49	LINK_TYPE_NORMAL
 50	LINK_TYPE_EMAIL
 51)
 52
 53// These are the possible flag values for the ListItem renderer.
 54// Multiple flag values may be ORed together.
 55// These are mostly of interest if you are writing a new output format.
 56const (
 57	LIST_TYPE_ORDERED = 1 << iota
 58	LIST_ITEM_CONTAINS_BLOCK
 59	LIST_ITEM_BEGINNING_OF_LIST
 60	LIST_ITEM_END_OF_LIST
 61)
 62
 63// These are the possible flag values for the table cell renderer.
 64// Only a single one of these values will be used; they are not ORed together.
 65// These are mostly of interest if you are writing a new output format.
 66const (
 67	TABLE_ALIGNMENT_LEFT = 1 << iota
 68	TABLE_ALIGNMENT_RIGHT
 69	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 70)
 71
 72// The size of a tab stop.
 73const (
 74	TAB_SIZE_DEFAULT = 4
 75	TAB_SIZE_EIGHT   = 8
 76)
 77
 78// These are the tags that are recognized as HTML block tags.
 79// Any of these can be included in markdown text without special escaping.
 80var blockTags = map[string]bool{
 81	"p":          true,
 82	"dl":         true,
 83	"h1":         true,
 84	"h2":         true,
 85	"h3":         true,
 86	"h4":         true,
 87	"h5":         true,
 88	"h6":         true,
 89	"ol":         true,
 90	"ul":         true,
 91	"del":        true,
 92	"div":        true,
 93	"ins":        true,
 94	"pre":        true,
 95	"form":       true,
 96	"math":       true,
 97	"table":      true,
 98	"iframe":     true,
 99	"script":     true,
100	"fieldset":   true,
101	"noscript":   true,
102	"blockquote": true,
103
104	// HTML5
105	"video":      true,
106	"aside":      true,
107	"canvas":     true,
108	"figure":     true,
109	"footer":     true,
110	"header":     true,
111	"hgroup":     true,
112	"output":     true,
113	"article":    true,
114	"section":    true,
115	"progress":   true,
116	"figcaption": true,
117}
118
119// Renderer is the rendering interface.
120// This is mostly of interest if you are implementing a new rendering format.
121//
122// When a byte slice is provided, it contains the (rendered) contents of the
123// element.
124//
125// When a callback is provided instead, it will write the contents of the
126// respective element directly to the output buffer and return true on success.
127// If the callback returns false, the rendering function should reset the
128// output buffer as though it had never been called.
129//
130// Currently Html and Latex implementations are provided
131type Renderer interface {
132	// block-level callbacks
133	BlockCode(out *bytes.Buffer, text []byte, lang string)
134	BlockQuote(out *bytes.Buffer, text []byte)
135	BlockHtml(out *bytes.Buffer, text []byte)
136	Header(out *bytes.Buffer, text func() bool, level int)
137	HRule(out *bytes.Buffer)
138	List(out *bytes.Buffer, text func() bool, flags int)
139	ListItem(out *bytes.Buffer, text []byte, flags int)
140	Paragraph(out *bytes.Buffer, text func() bool)
141	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
142	TableRow(out *bytes.Buffer, text []byte)
143	TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
144	TableCell(out *bytes.Buffer, text []byte, flags int)
145	Footnotes(out *bytes.Buffer, text func() bool)
146	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
147
148	// Span-level callbacks
149	AutoLink(out *bytes.Buffer, link []byte, kind int)
150	CodeSpan(out *bytes.Buffer, text []byte)
151	DoubleEmphasis(out *bytes.Buffer, text []byte)
152	Emphasis(out *bytes.Buffer, text []byte)
153	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
154	LineBreak(out *bytes.Buffer)
155	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
156	RawHtmlTag(out *bytes.Buffer, tag []byte)
157	TripleEmphasis(out *bytes.Buffer, text []byte)
158	StrikeThrough(out *bytes.Buffer, text []byte)
159	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
160
161	// Low-level callbacks
162	Entity(out *bytes.Buffer, entity []byte)
163	NormalText(out *bytes.Buffer, text []byte)
164
165	// Header and footer
166	DocumentHeader(out *bytes.Buffer)
167	DocumentFooter(out *bytes.Buffer)
168
169	GetFlags() int
170}
171
172// Callback functions for inline parsing. One such function is defined
173// for each character that triggers a response when parsing inline data.
174type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
175
176// Parser holds runtime state used by the parser.
177// This is constructed by the Markdown function.
178type parser struct {
179	r              Renderer
180	refs           map[string]*reference
181	inlineCallback [256]inlineParser
182	flags          int
183	nesting        int
184	maxNesting     int
185	insideLink     bool
186
187	// Footnotes need to be ordered as well as available to quickly check for
188	// presence. If a ref is also a footnote, it's stored both in refs and here
189	// in notes. Slice is nil if footnotes not enabled.
190	notes []*reference
191}
192
193//
194//
195// Public interface
196//
197//
198
199// MarkdownBasic is a convenience function for simple rendering.
200// It processes markdown input with no extensions enabled.
201func MarkdownBasic(input []byte) []byte {
202	// set up the HTML renderer
203	htmlFlags := HTML_USE_XHTML
204	renderer := HtmlRenderer(htmlFlags, "", "")
205
206	// set up the parser
207	extensions := 0
208
209	return Markdown(input, renderer, extensions)
210}
211
212// Call Markdown with most useful extensions enabled
213// MarkdownCommon is a convenience function for simple rendering.
214// It processes markdown input with common extensions enabled, including:
215//
216// * Smartypants processing with smart fractions and LaTeX dashes
217//
218// * Intra-word emphasis suppression
219//
220// * Tables
221//
222// * Fenced code blocks
223//
224// * Autolinking
225//
226// * Strikethrough support
227//
228// * Strict header parsing
229func MarkdownCommon(input []byte) []byte {
230	// set up the HTML renderer
231	htmlFlags := 0
232	htmlFlags |= HTML_USE_XHTML
233	htmlFlags |= HTML_USE_SMARTYPANTS
234	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
235	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
236	htmlFlags |= HTML_SANITIZE_OUTPUT
237	renderer := HtmlRenderer(htmlFlags, "", "")
238
239	// set up the parser
240	extensions := 0
241	extensions |= EXTENSION_NO_INTRA_EMPHASIS
242	extensions |= EXTENSION_TABLES
243	extensions |= EXTENSION_FENCED_CODE
244	extensions |= EXTENSION_AUTOLINK
245	extensions |= EXTENSION_STRIKETHROUGH
246	extensions |= EXTENSION_SPACE_HEADERS
247
248	return Markdown(input, renderer, extensions)
249}
250
251// Markdown is the main rendering function.
252// It parses and renders a block of markdown-encoded text.
253// The supplied Renderer is used to format the output, and extensions dictates
254// which non-standard extensions are enabled.
255//
256// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
257// LatexRenderer, respectively.
258func Markdown(input []byte, renderer Renderer, extensions int) []byte {
259	// no point in parsing if we can't render
260	if renderer == nil {
261		return nil
262	}
263
264	// fill in the render structure
265	p := new(parser)
266	p.r = renderer
267	p.flags = extensions
268	p.refs = make(map[string]*reference)
269	p.maxNesting = 16
270	p.insideLink = false
271
272	// register inline parsers
273	p.inlineCallback['*'] = emphasis
274	p.inlineCallback['_'] = emphasis
275	if extensions&EXTENSION_STRIKETHROUGH != 0 {
276		p.inlineCallback['~'] = emphasis
277	}
278	p.inlineCallback['`'] = codeSpan
279	p.inlineCallback['\n'] = lineBreak
280	p.inlineCallback['['] = link
281	p.inlineCallback['<'] = leftAngle
282	p.inlineCallback['\\'] = escape
283	p.inlineCallback['&'] = entity
284
285	if extensions&EXTENSION_AUTOLINK != 0 {
286		p.inlineCallback[':'] = autoLink
287	}
288
289	if extensions&EXTENSION_FOOTNOTES != 0 {
290		p.notes = make([]*reference, 0)
291	}
292
293	first := firstPass(p, input)
294	second := secondPass(p, first)
295
296	if renderer.GetFlags()&HTML_SANITIZE_OUTPUT != 0 {
297		second = sanitizeHtml(second)
298	}
299
300	return second
301}
302
303// first pass:
304// - extract references
305// - expand tabs
306// - normalize newlines
307// - copy everything else
308// - add missing newlines before fenced code blocks
309func firstPass(p *parser, input []byte) []byte {
310	var out bytes.Buffer
311	tabSize := TAB_SIZE_DEFAULT
312	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
313		tabSize = TAB_SIZE_EIGHT
314	}
315	beg, end := 0, 0
316	lastLineWasBlank := false
317	lastFencedCodeBlockEnd := 0
318	for beg < len(input) { // iterate over lines
319		if end = isReference(p, input[beg:], tabSize); end > 0 {
320			beg += end
321		} else { // skip to the next line
322			end = beg
323			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
324				end++
325			}
326
327			if p.flags&EXTENSION_FENCED_CODE != 0 {
328				// when last line was none blank and a fenced code block comes after
329				if !lastLineWasBlank && beg >= lastFencedCodeBlockEnd {
330					if i := p.fencedCode(&out, append(input[beg:], '\n'), false); i > 0 {
331						out.WriteByte('\n') // need to inject additional linebreak
332						lastFencedCodeBlockEnd = beg + i
333					}
334				}
335				lastLineWasBlank = end == beg
336			}
337
338			// add the line body if present
339			if end > beg {
340				expandTabs(&out, input[beg:end], tabSize)
341			}
342			out.WriteByte('\n')
343
344			if end < len(input) && input[end] == '\r' {
345				end++
346			}
347			if end < len(input) && input[end] == '\n' {
348				end++
349			}
350
351			beg = end
352		}
353	}
354
355	// empty input?
356	if out.Len() == 0 {
357		out.WriteByte('\n')
358	}
359
360	return out.Bytes()
361}
362
363// second pass: actual rendering
364func secondPass(p *parser, input []byte) []byte {
365	var output bytes.Buffer
366
367	p.r.DocumentHeader(&output)
368	p.block(&output, input)
369
370	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
371		p.r.Footnotes(&output, func() bool {
372			flags := LIST_ITEM_BEGINNING_OF_LIST
373			for _, ref := range p.notes {
374				var buf bytes.Buffer
375				if ref.hasBlock {
376					flags |= LIST_ITEM_CONTAINS_BLOCK
377					p.block(&buf, ref.title)
378				} else {
379					p.inline(&buf, ref.title)
380				}
381				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
382				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
383			}
384
385			return true
386		})
387	}
388
389	p.r.DocumentFooter(&output)
390
391	if p.nesting != 0 {
392		panic("Nesting level did not end at zero")
393	}
394
395	return output.Bytes()
396}
397
398//
399// Link references
400//
401// This section implements support for references that (usually) appear
402// as footnotes in a document, and can be referenced anywhere in the document.
403// The basic format is:
404//
405//    [1]: http://www.google.com/ "Google"
406//    [2]: http://www.github.com/ "Github"
407//
408// Anywhere in the document, the reference can be linked by referring to its
409// label, i.e., 1 and 2 in this example, as in:
410//
411//    This library is hosted on [Github][2], a git hosting site.
412//
413// Actual footnotes as specified in Pandoc and supported by some other Markdown
414// libraries such as php-markdown are also taken care of. They look like this:
415//
416//    This sentence needs a bit of further explanation.[^note]
417//
418//    [^note]: This is the explanation.
419//
420// Footnotes should be placed at the end of the document in an ordered list.
421// Inline footnotes such as:
422//
423//    Inline footnotes^[Not supported.] also exist.
424//
425// are not yet supported.
426
427// References are parsed and stored in this struct.
428type reference struct {
429	link     []byte
430	title    []byte
431	noteId   int // 0 if not a footnote ref
432	hasBlock bool
433}
434
435// Check whether or not data starts with a reference link.
436// If so, it is parsed and stored in the list of references
437// (in the render struct).
438// Returns the number of bytes to skip to move past it,
439// or zero if the first line is not a reference.
440func isReference(p *parser, data []byte, tabSize int) int {
441	// up to 3 optional leading spaces
442	if len(data) < 4 {
443		return 0
444	}
445	i := 0
446	for i < 3 && data[i] == ' ' {
447		i++
448	}
449
450	noteId := 0
451
452	// id part: anything but a newline between brackets
453	if data[i] != '[' {
454		return 0
455	}
456	i++
457	if p.flags&EXTENSION_FOOTNOTES != 0 {
458		if data[i] == '^' {
459			// we can set it to anything here because the proper noteIds will
460			// be assigned later during the second pass. It just has to be != 0
461			noteId = 1
462			i++
463		}
464	}
465	idOffset := i
466	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
467		i++
468	}
469	if i >= len(data) || data[i] != ']' {
470		return 0
471	}
472	idEnd := i
473
474	// spacer: colon (space | tab)* newline? (space | tab)*
475	i++
476	if i >= len(data) || data[i] != ':' {
477		return 0
478	}
479	i++
480	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
481		i++
482	}
483	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
484		i++
485		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
486			i++
487		}
488	}
489	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
490		i++
491	}
492	if i >= len(data) {
493		return 0
494	}
495
496	var (
497		linkOffset, linkEnd   int
498		titleOffset, titleEnd int
499		lineEnd               int
500		raw                   []byte
501		hasBlock              bool
502	)
503
504	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
505		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
506		lineEnd = linkEnd
507	} else {
508		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
509	}
510	if lineEnd == 0 {
511		return 0
512	}
513
514	// a valid ref has been found
515
516	ref := &reference{
517		noteId:   noteId,
518		hasBlock: hasBlock,
519	}
520
521	if noteId > 0 {
522		// reusing the link field for the id since footnotes don't have links
523		ref.link = data[idOffset:idEnd]
524		// if footnote, it's not really a title, it's the contained text
525		ref.title = raw
526	} else {
527		ref.link = data[linkOffset:linkEnd]
528		ref.title = data[titleOffset:titleEnd]
529	}
530
531	// id matches are case-insensitive
532	id := string(bytes.ToLower(data[idOffset:idEnd]))
533
534	p.refs[id] = ref
535
536	return lineEnd
537}
538
539func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
540	// link: whitespace-free sequence, optionally between angle brackets
541	if data[i] == '<' {
542		i++
543	}
544	linkOffset = i
545	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
546		i++
547	}
548	linkEnd = i
549	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
550		linkOffset++
551		linkEnd--
552	}
553
554	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
555	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
556		i++
557	}
558	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
559		return
560	}
561
562	// compute end-of-line
563	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
564		lineEnd = i
565	}
566	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
567		lineEnd++
568	}
569
570	// optional (space|tab)* spacer after a newline
571	if lineEnd > 0 {
572		i = lineEnd + 1
573		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
574			i++
575		}
576	}
577
578	// optional title: any non-newline sequence enclosed in '"() alone on its line
579	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
580		i++
581		titleOffset = i
582
583		// look for EOL
584		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
585			i++
586		}
587		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
588			titleEnd = i + 1
589		} else {
590			titleEnd = i
591		}
592
593		// step back
594		i--
595		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
596			i--
597		}
598		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
599			lineEnd = titleEnd
600			titleEnd = i
601		}
602	}
603
604	return
605}
606
607// The first bit of this logic is the same as (*parser).listItem, but the rest
608// is much simpler. This function simply finds the entire block and shifts it
609// over by one tab if it is indeed a block (just returns the line if it's not).
610// blockEnd is the end of the section in the input buffer, and contents is the
611// extracted text that was shifted over one tab. It will need to be rendered at
612// the end of the document.
613func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
614	if i == 0 || len(data) == 0 {
615		return
616	}
617
618	// skip leading whitespace on first line
619	for i < len(data) && data[i] == ' ' {
620		i++
621	}
622
623	blockStart = i
624
625	// find the end of the line
626	blockEnd = i
627	for i < len(data) && data[i-1] != '\n' {
628		i++
629	}
630
631	// get working buffer
632	var raw bytes.Buffer
633
634	// put the first line into the working buffer
635	raw.Write(data[blockEnd:i])
636	blockEnd = i
637
638	// process the following lines
639	containsBlankLine := false
640
641gatherLines:
642	for blockEnd < len(data) {
643		i++
644
645		// find the end of this line
646		for i < len(data) && data[i-1] != '\n' {
647			i++
648		}
649
650		// if it is an empty line, guess that it is part of this item
651		// and move on to the next line
652		if p.isEmpty(data[blockEnd:i]) > 0 {
653			containsBlankLine = true
654			blockEnd = i
655			continue
656		}
657
658		n := 0
659		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
660			// this is the end of the block.
661			// we don't want to include this last line in the index.
662			break gatherLines
663		}
664
665		// if there were blank lines before this one, insert a new one now
666		if containsBlankLine {
667			raw.WriteByte('\n')
668			containsBlankLine = false
669		}
670
671		// get rid of that first tab, write to buffer
672		raw.Write(data[blockEnd+n : i])
673		hasBlock = true
674
675		blockEnd = i
676	}
677
678	if data[blockEnd-1] != '\n' {
679		raw.WriteByte('\n')
680	}
681
682	contents = raw.Bytes()
683
684	return
685}
686
687//
688//
689// Miscellaneous helper functions
690//
691//
692
693// Test if a character is a punctuation symbol.
694// Taken from a private function in regexp in the stdlib.
695func ispunct(c byte) bool {
696	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
697		if c == r {
698			return true
699		}
700	}
701	return false
702}
703
704// Test if a character is a whitespace character.
705func isspace(c byte) bool {
706	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
707}
708
709// Test if a character is letter.
710func isletter(c byte) bool {
711	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
712}
713
714// Test if a character is a letter or a digit.
715// TODO: check when this is looking for ASCII alnum and when it should use unicode
716func isalnum(c byte) bool {
717	return (c >= '0' && c <= '9') || isletter(c)
718}
719
720// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
721// always ends output with a newline
722func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
723	// first, check for common cases: no tabs, or only tabs at beginning of line
724	i, prefix := 0, 0
725	slowcase := false
726	for i = 0; i < len(line); i++ {
727		if line[i] == '\t' {
728			if prefix == i {
729				prefix++
730			} else {
731				slowcase = true
732				break
733			}
734		}
735	}
736
737	// no need to decode runes if all tabs are at the beginning of the line
738	if !slowcase {
739		for i = 0; i < prefix*tabSize; i++ {
740			out.WriteByte(' ')
741		}
742		out.Write(line[prefix:])
743		return
744	}
745
746	// the slow case: we need to count runes to figure out how
747	// many spaces to insert for each tab
748	column := 0
749	i = 0
750	for i < len(line) {
751		start := i
752		for i < len(line) && line[i] != '\t' {
753			_, size := utf8.DecodeRune(line[i:])
754			i += size
755			column++
756		}
757
758		if i > start {
759			out.Write(line[start:i])
760		}
761
762		if i >= len(line) {
763			break
764		}
765
766		for {
767			out.WriteByte(' ')
768			column++
769			if column%tabSize == 0 {
770				break
771			}
772		}
773
774		i++
775	}
776}
777
778// Find if a line counts as indented or not.
779// Returns number of characters the indent is (0 = not indented).
780func isIndented(data []byte, indentSize int) int {
781	if len(data) == 0 {
782		return 0
783	}
784	if data[0] == '\t' {
785		return 1
786	}
787	if len(data) < indentSize {
788		return 0
789	}
790	for i := 0; i < indentSize; i++ {
791		if data[i] != ' ' {
792			return 0
793		}
794	}
795	return indentSize
796}
797
798// Create a url-safe slug for fragments
799func slugify(in []byte) []byte {
800	if len(in) == 0 {
801		return in
802	}
803	out := make([]byte, 0, len(in))
804	sym := false
805
806	for _, ch := range in {
807		if isalnum(ch) {
808			sym = false
809			out = append(out, ch)
810		} else if sym {
811			continue
812		} else {
813			out = append(out, '-')
814			sym = true
815		}
816	}
817	var a, b int
818	var ch byte
819	for a, ch = range out {
820		if ch != '-' {
821			break
822		}
823	}
824	for b = len(out) - 1; b > 0; b-- {
825		if out[b] != '-' {
826			break
827		}
828	}
829	return out[a : b+1]
830}