all repos — grayfriday @ 7ad5f9c1197c54c0be24fb0b0b1d75eb7ef19d89

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS          = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                                 // render tables
 33	EXTENSION_FENCED_CODE                            // render fenced code blocks
 34	EXTENSION_AUTOLINK                               // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                          // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS                        // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                          // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK                        // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                         // expand tabs to eight spaces instead of four
 40	EXTENSION_FOOTNOTES                              // Pandoc-style footnotes
 41	EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK             // No need to insert an empty line to start a (code, quote, order list, unorder list)block
 42	EXTENSION_HEADER_IDS                             // specify header IDs  with {#id}
 43)
 44
 45// These are the possible flag values for the link renderer.
 46// Only a single one of these values will be used; they are not ORed together.
 47// These are mostly of interest if you are writing a new output format.
 48const (
 49	LINK_TYPE_NOT_AUTOLINK = iota
 50	LINK_TYPE_NORMAL
 51	LINK_TYPE_EMAIL
 52)
 53
 54// These are the possible flag values for the ListItem renderer.
 55// Multiple flag values may be ORed together.
 56// These are mostly of interest if you are writing a new output format.
 57const (
 58	LIST_TYPE_ORDERED = 1 << iota
 59	LIST_ITEM_CONTAINS_BLOCK
 60	LIST_ITEM_BEGINNING_OF_LIST
 61	LIST_ITEM_END_OF_LIST
 62)
 63
 64// These are the possible flag values for the table cell renderer.
 65// Only a single one of these values will be used; they are not ORed together.
 66// These are mostly of interest if you are writing a new output format.
 67const (
 68	TABLE_ALIGNMENT_LEFT = 1 << iota
 69	TABLE_ALIGNMENT_RIGHT
 70	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 71)
 72
 73// The size of a tab stop.
 74const (
 75	TAB_SIZE_DEFAULT = 4
 76	TAB_SIZE_EIGHT   = 8
 77)
 78
 79// These are the tags that are recognized as HTML block tags.
 80// Any of these can be included in markdown text without special escaping.
 81var blockTags = map[string]bool{
 82	"p":          true,
 83	"dl":         true,
 84	"h1":         true,
 85	"h2":         true,
 86	"h3":         true,
 87	"h4":         true,
 88	"h5":         true,
 89	"h6":         true,
 90	"ol":         true,
 91	"ul":         true,
 92	"del":        true,
 93	"div":        true,
 94	"ins":        true,
 95	"pre":        true,
 96	"form":       true,
 97	"math":       true,
 98	"table":      true,
 99	"iframe":     true,
100	"script":     true,
101	"fieldset":   true,
102	"noscript":   true,
103	"blockquote": true,
104
105	// HTML5
106	"video":      true,
107	"aside":      true,
108	"canvas":     true,
109	"figure":     true,
110	"footer":     true,
111	"header":     true,
112	"hgroup":     true,
113	"output":     true,
114	"article":    true,
115	"section":    true,
116	"progress":   true,
117	"figcaption": true,
118}
119
120// Renderer is the rendering interface.
121// This is mostly of interest if you are implementing a new rendering format.
122//
123// When a byte slice is provided, it contains the (rendered) contents of the
124// element.
125//
126// When a callback is provided instead, it will write the contents of the
127// respective element directly to the output buffer and return true on success.
128// If the callback returns false, the rendering function should reset the
129// output buffer as though it had never been called.
130//
131// Currently Html and Latex implementations are provided
132type Renderer interface {
133	// block-level callbacks
134	BlockCode(out *bytes.Buffer, text []byte, lang string)
135	BlockQuote(out *bytes.Buffer, text []byte)
136	BlockHtml(out *bytes.Buffer, text []byte)
137	Header(out *bytes.Buffer, text func() bool, level int, id string)
138	HRule(out *bytes.Buffer)
139	List(out *bytes.Buffer, text func() bool, flags int)
140	ListItem(out *bytes.Buffer, text []byte, flags int)
141	Paragraph(out *bytes.Buffer, text func() bool)
142	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
143	TableRow(out *bytes.Buffer, text []byte)
144	TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
145	TableCell(out *bytes.Buffer, text []byte, flags int)
146	Footnotes(out *bytes.Buffer, text func() bool)
147	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
148
149	// Span-level callbacks
150	AutoLink(out *bytes.Buffer, link []byte, kind int)
151	CodeSpan(out *bytes.Buffer, text []byte)
152	DoubleEmphasis(out *bytes.Buffer, text []byte)
153	Emphasis(out *bytes.Buffer, text []byte)
154	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
155	LineBreak(out *bytes.Buffer)
156	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
157	RawHtmlTag(out *bytes.Buffer, tag []byte)
158	TripleEmphasis(out *bytes.Buffer, text []byte)
159	StrikeThrough(out *bytes.Buffer, text []byte)
160	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
161
162	// Low-level callbacks
163	Entity(out *bytes.Buffer, entity []byte)
164	NormalText(out *bytes.Buffer, text []byte)
165
166	// Header and footer
167	DocumentHeader(out *bytes.Buffer)
168	DocumentFooter(out *bytes.Buffer)
169
170	GetFlags() int
171}
172
173// Callback functions for inline parsing. One such function is defined
174// for each character that triggers a response when parsing inline data.
175type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
176
177// Parser holds runtime state used by the parser.
178// This is constructed by the Markdown function.
179type parser struct {
180	r              Renderer
181	refs           map[string]*reference
182	inlineCallback [256]inlineParser
183	flags          int
184	nesting        int
185	maxNesting     int
186	insideLink     bool
187
188	// Footnotes need to be ordered as well as available to quickly check for
189	// presence. If a ref is also a footnote, it's stored both in refs and here
190	// in notes. Slice is nil if footnotes not enabled.
191	notes []*reference
192}
193
194//
195//
196// Public interface
197//
198//
199
200// MarkdownBasic is a convenience function for simple rendering.
201// It processes markdown input with no extensions enabled.
202func MarkdownBasic(input []byte) []byte {
203	// set up the HTML renderer
204	htmlFlags := HTML_USE_XHTML
205	renderer := HtmlRenderer(htmlFlags, "", "")
206
207	// set up the parser
208	extensions := 0
209
210	return Markdown(input, renderer, extensions)
211}
212
213// Call Markdown with most useful extensions enabled
214// MarkdownCommon is a convenience function for simple rendering.
215// It processes markdown input with common extensions enabled, including:
216//
217// * Smartypants processing with smart fractions and LaTeX dashes
218//
219// * Intra-word emphasis suppression
220//
221// * Tables
222//
223// * Fenced code blocks
224//
225// * Autolinking
226//
227// * Strikethrough support
228//
229// * Strict header parsing
230//
231// * Custom Header IDs
232func MarkdownCommon(input []byte) []byte {
233	// set up the HTML renderer
234	htmlFlags := 0
235	htmlFlags |= HTML_USE_XHTML
236	htmlFlags |= HTML_USE_SMARTYPANTS
237	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
238	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
239	htmlFlags |= HTML_SANITIZE_OUTPUT
240	renderer := HtmlRenderer(htmlFlags, "", "")
241
242	// set up the parser
243	extensions := 0
244	extensions |= EXTENSION_NO_INTRA_EMPHASIS
245	extensions |= EXTENSION_TABLES
246	extensions |= EXTENSION_FENCED_CODE
247	extensions |= EXTENSION_AUTOLINK
248	extensions |= EXTENSION_STRIKETHROUGH
249	extensions |= EXTENSION_SPACE_HEADERS
250	extensions |= EXTENSION_HEADER_IDS
251
252	return Markdown(input, renderer, extensions)
253}
254
255// Markdown is the main rendering function.
256// It parses and renders a block of markdown-encoded text.
257// The supplied Renderer is used to format the output, and extensions dictates
258// which non-standard extensions are enabled.
259//
260// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
261// LatexRenderer, respectively.
262func Markdown(input []byte, renderer Renderer, extensions int) []byte {
263	// no point in parsing if we can't render
264	if renderer == nil {
265		return nil
266	}
267
268	// fill in the render structure
269	p := new(parser)
270	p.r = renderer
271	p.flags = extensions
272	p.refs = make(map[string]*reference)
273	p.maxNesting = 16
274	p.insideLink = false
275
276	// register inline parsers
277	p.inlineCallback['*'] = emphasis
278	p.inlineCallback['_'] = emphasis
279	if extensions&EXTENSION_STRIKETHROUGH != 0 {
280		p.inlineCallback['~'] = emphasis
281	}
282	p.inlineCallback['`'] = codeSpan
283	p.inlineCallback['\n'] = lineBreak
284	p.inlineCallback['['] = link
285	p.inlineCallback['<'] = leftAngle
286	p.inlineCallback['\\'] = escape
287	p.inlineCallback['&'] = entity
288
289	if extensions&EXTENSION_AUTOLINK != 0 {
290		p.inlineCallback[':'] = autoLink
291	}
292
293	if extensions&EXTENSION_FOOTNOTES != 0 {
294		p.notes = make([]*reference, 0)
295	}
296
297	first := firstPass(p, input)
298	second := secondPass(p, first)
299
300	if renderer.GetFlags()&HTML_SANITIZE_OUTPUT != 0 {
301		second = sanitizeHtml(second)
302	}
303
304	return second
305}
306
307// first pass:
308// - extract references
309// - expand tabs
310// - normalize newlines
311// - copy everything else
312func firstPass(p *parser, input []byte) []byte {
313	var out bytes.Buffer
314	tabSize := TAB_SIZE_DEFAULT
315	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
316		tabSize = TAB_SIZE_EIGHT
317	}
318	beg, end := 0, 0
319	for beg < len(input) { // iterate over lines
320		if end = isReference(p, input[beg:], tabSize); end > 0 {
321			beg += end
322		} else { // skip to the next line
323			end = beg
324			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
325				end++
326			}
327
328			// add the line body if present
329			if end > beg {
330				expandTabs(&out, input[beg:end], tabSize)
331			}
332			out.WriteByte('\n')
333
334			if end < len(input) && input[end] == '\r' {
335				end++
336			}
337			if end < len(input) && input[end] == '\n' {
338				end++
339			}
340
341			beg = end
342		}
343	}
344
345	// empty input?
346	if out.Len() == 0 {
347		out.WriteByte('\n')
348	}
349
350	return out.Bytes()
351}
352
353// second pass: actual rendering
354func secondPass(p *parser, input []byte) []byte {
355	var output bytes.Buffer
356
357	p.r.DocumentHeader(&output)
358	p.block(&output, input)
359
360	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
361		p.r.Footnotes(&output, func() bool {
362			flags := LIST_ITEM_BEGINNING_OF_LIST
363			for _, ref := range p.notes {
364				var buf bytes.Buffer
365				if ref.hasBlock {
366					flags |= LIST_ITEM_CONTAINS_BLOCK
367					p.block(&buf, ref.title)
368				} else {
369					p.inline(&buf, ref.title)
370				}
371				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
372				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
373			}
374
375			return true
376		})
377	}
378
379	p.r.DocumentFooter(&output)
380
381	if p.nesting != 0 {
382		panic("Nesting level did not end at zero")
383	}
384
385	return output.Bytes()
386}
387
388//
389// Link references
390//
391// This section implements support for references that (usually) appear
392// as footnotes in a document, and can be referenced anywhere in the document.
393// The basic format is:
394//
395//    [1]: http://www.google.com/ "Google"
396//    [2]: http://www.github.com/ "Github"
397//
398// Anywhere in the document, the reference can be linked by referring to its
399// label, i.e., 1 and 2 in this example, as in:
400//
401//    This library is hosted on [Github][2], a git hosting site.
402//
403// Actual footnotes as specified in Pandoc and supported by some other Markdown
404// libraries such as php-markdown are also taken care of. They look like this:
405//
406//    This sentence needs a bit of further explanation.[^note]
407//
408//    [^note]: This is the explanation.
409//
410// Footnotes should be placed at the end of the document in an ordered list.
411// Inline footnotes such as:
412//
413//    Inline footnotes^[Not supported.] also exist.
414//
415// are not yet supported.
416
417// References are parsed and stored in this struct.
418type reference struct {
419	link     []byte
420	title    []byte
421	noteId   int // 0 if not a footnote ref
422	hasBlock bool
423}
424
425// Check whether or not data starts with a reference link.
426// If so, it is parsed and stored in the list of references
427// (in the render struct).
428// Returns the number of bytes to skip to move past it,
429// or zero if the first line is not a reference.
430func isReference(p *parser, data []byte, tabSize int) int {
431	// up to 3 optional leading spaces
432	if len(data) < 4 {
433		return 0
434	}
435	i := 0
436	for i < 3 && data[i] == ' ' {
437		i++
438	}
439
440	noteId := 0
441
442	// id part: anything but a newline between brackets
443	if data[i] != '[' {
444		return 0
445	}
446	i++
447	if p.flags&EXTENSION_FOOTNOTES != 0 {
448		if data[i] == '^' {
449			// we can set it to anything here because the proper noteIds will
450			// be assigned later during the second pass. It just has to be != 0
451			noteId = 1
452			i++
453		}
454	}
455	idOffset := i
456	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
457		i++
458	}
459	if i >= len(data) || data[i] != ']' {
460		return 0
461	}
462	idEnd := i
463
464	// spacer: colon (space | tab)* newline? (space | tab)*
465	i++
466	if i >= len(data) || data[i] != ':' {
467		return 0
468	}
469	i++
470	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
471		i++
472	}
473	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
474		i++
475		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
476			i++
477		}
478	}
479	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
480		i++
481	}
482	if i >= len(data) {
483		return 0
484	}
485
486	var (
487		linkOffset, linkEnd   int
488		titleOffset, titleEnd int
489		lineEnd               int
490		raw                   []byte
491		hasBlock              bool
492	)
493
494	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
495		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
496		lineEnd = linkEnd
497	} else {
498		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
499	}
500	if lineEnd == 0 {
501		return 0
502	}
503
504	// a valid ref has been found
505
506	ref := &reference{
507		noteId:   noteId,
508		hasBlock: hasBlock,
509	}
510
511	if noteId > 0 {
512		// reusing the link field for the id since footnotes don't have links
513		ref.link = data[idOffset:idEnd]
514		// if footnote, it's not really a title, it's the contained text
515		ref.title = raw
516	} else {
517		ref.link = data[linkOffset:linkEnd]
518		ref.title = data[titleOffset:titleEnd]
519	}
520
521	// id matches are case-insensitive
522	id := string(bytes.ToLower(data[idOffset:idEnd]))
523
524	p.refs[id] = ref
525
526	return lineEnd
527}
528
529func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
530	// link: whitespace-free sequence, optionally between angle brackets
531	if data[i] == '<' {
532		i++
533	}
534	linkOffset = i
535	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
536		i++
537	}
538	linkEnd = i
539	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
540		linkOffset++
541		linkEnd--
542	}
543
544	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
545	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
546		i++
547	}
548	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
549		return
550	}
551
552	// compute end-of-line
553	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
554		lineEnd = i
555	}
556	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
557		lineEnd++
558	}
559
560	// optional (space|tab)* spacer after a newline
561	if lineEnd > 0 {
562		i = lineEnd + 1
563		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
564			i++
565		}
566	}
567
568	// optional title: any non-newline sequence enclosed in '"() alone on its line
569	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
570		i++
571		titleOffset = i
572
573		// look for EOL
574		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
575			i++
576		}
577		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
578			titleEnd = i + 1
579		} else {
580			titleEnd = i
581		}
582
583		// step back
584		i--
585		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
586			i--
587		}
588		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
589			lineEnd = titleEnd
590			titleEnd = i
591		}
592	}
593
594	return
595}
596
597// The first bit of this logic is the same as (*parser).listItem, but the rest
598// is much simpler. This function simply finds the entire block and shifts it
599// over by one tab if it is indeed a block (just returns the line if it's not).
600// blockEnd is the end of the section in the input buffer, and contents is the
601// extracted text that was shifted over one tab. It will need to be rendered at
602// the end of the document.
603func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
604	if i == 0 || len(data) == 0 {
605		return
606	}
607
608	// skip leading whitespace on first line
609	for i < len(data) && data[i] == ' ' {
610		i++
611	}
612
613	blockStart = i
614
615	// find the end of the line
616	blockEnd = i
617	for i < len(data) && data[i-1] != '\n' {
618		i++
619	}
620
621	// get working buffer
622	var raw bytes.Buffer
623
624	// put the first line into the working buffer
625	raw.Write(data[blockEnd:i])
626	blockEnd = i
627
628	// process the following lines
629	containsBlankLine := false
630
631gatherLines:
632	for blockEnd < len(data) {
633		i++
634
635		// find the end of this line
636		for i < len(data) && data[i-1] != '\n' {
637			i++
638		}
639
640		// if it is an empty line, guess that it is part of this item
641		// and move on to the next line
642		if p.isEmpty(data[blockEnd:i]) > 0 {
643			containsBlankLine = true
644			blockEnd = i
645			continue
646		}
647
648		n := 0
649		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
650			// this is the end of the block.
651			// we don't want to include this last line in the index.
652			break gatherLines
653		}
654
655		// if there were blank lines before this one, insert a new one now
656		if containsBlankLine {
657			raw.WriteByte('\n')
658			containsBlankLine = false
659		}
660
661		// get rid of that first tab, write to buffer
662		raw.Write(data[blockEnd+n : i])
663		hasBlock = true
664
665		blockEnd = i
666	}
667
668	if data[blockEnd-1] != '\n' {
669		raw.WriteByte('\n')
670	}
671
672	contents = raw.Bytes()
673
674	return
675}
676
677//
678//
679// Miscellaneous helper functions
680//
681//
682
683// Test if a character is a punctuation symbol.
684// Taken from a private function in regexp in the stdlib.
685func ispunct(c byte) bool {
686	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
687		if c == r {
688			return true
689		}
690	}
691	return false
692}
693
694// Test if a character is a whitespace character.
695func isspace(c byte) bool {
696	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
697}
698
699// Test if a character is letter.
700func isletter(c byte) bool {
701	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
702}
703
704// Test if a character is a letter or a digit.
705// TODO: check when this is looking for ASCII alnum and when it should use unicode
706func isalnum(c byte) bool {
707	return (c >= '0' && c <= '9') || isletter(c)
708}
709
710// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
711// always ends output with a newline
712func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
713	// first, check for common cases: no tabs, or only tabs at beginning of line
714	i, prefix := 0, 0
715	slowcase := false
716	for i = 0; i < len(line); i++ {
717		if line[i] == '\t' {
718			if prefix == i {
719				prefix++
720			} else {
721				slowcase = true
722				break
723			}
724		}
725	}
726
727	// no need to decode runes if all tabs are at the beginning of the line
728	if !slowcase {
729		for i = 0; i < prefix*tabSize; i++ {
730			out.WriteByte(' ')
731		}
732		out.Write(line[prefix:])
733		return
734	}
735
736	// the slow case: we need to count runes to figure out how
737	// many spaces to insert for each tab
738	column := 0
739	i = 0
740	for i < len(line) {
741		start := i
742		for i < len(line) && line[i] != '\t' {
743			_, size := utf8.DecodeRune(line[i:])
744			i += size
745			column++
746		}
747
748		if i > start {
749			out.Write(line[start:i])
750		}
751
752		if i >= len(line) {
753			break
754		}
755
756		for {
757			out.WriteByte(' ')
758			column++
759			if column%tabSize == 0 {
760				break
761			}
762		}
763
764		i++
765	}
766}
767
768// Find if a line counts as indented or not.
769// Returns number of characters the indent is (0 = not indented).
770func isIndented(data []byte, indentSize int) int {
771	if len(data) == 0 {
772		return 0
773	}
774	if data[0] == '\t' {
775		return 1
776	}
777	if len(data) < indentSize {
778		return 0
779	}
780	for i := 0; i < indentSize; i++ {
781		if data[i] != ' ' {
782			return 0
783		}
784	}
785	return indentSize
786}
787
788// Create a url-safe slug for fragments
789func slugify(in []byte) []byte {
790	if len(in) == 0 {
791		return in
792	}
793	out := make([]byte, 0, len(in))
794	sym := false
795
796	for _, ch := range in {
797		if isalnum(ch) {
798			sym = false
799			out = append(out, ch)
800		} else if sym {
801			continue
802		} else {
803			out = append(out, '-')
804			sym = true
805		}
806	}
807	var a, b int
808	var ch byte
809	for a, ch = range out {
810		if ch != '-' {
811			break
812		}
813	}
814	for b = len(out) - 1; b > 0; b-- {
815		if out[b] != '-' {
816			break
817		}
818	}
819	return out[a : b+1]
820}