markdown.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11//
12// Markdown parsing and processing
13//
14//
15
16// Blackfriday markdown processor.
17//
18// Translates plain text with simple formatting rules into HTML or LaTeX.
19package blackfriday
20
21import (
22 "bytes"
23 "unicode/utf8"
24)
25
26const VERSION = "1.1"
27
28// These are the supported markdown parsing extensions.
29// OR these values together to select multiple extensions.
30const (
31 EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
32 EXTENSION_TABLES // render tables
33 EXTENSION_FENCED_CODE // render fenced code blocks
34 EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
35 EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
36 EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
37 EXTENSION_SPACE_HEADERS // be strict about prefix header rules
38 EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
39 EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
40 EXTENSION_FOOTNOTES // Pandoc-style footnotes
41 EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, order list, unorder list)block
42 EXTENSION_HEADER_IDS // specify header IDs with {#id}
43)
44
45// These are the possible flag values for the link renderer.
46// Only a single one of these values will be used; they are not ORed together.
47// These are mostly of interest if you are writing a new output format.
48const (
49 LINK_TYPE_NOT_AUTOLINK = iota
50 LINK_TYPE_NORMAL
51 LINK_TYPE_EMAIL
52)
53
54// These are the possible flag values for the ListItem renderer.
55// Multiple flag values may be ORed together.
56// These are mostly of interest if you are writing a new output format.
57const (
58 LIST_TYPE_ORDERED = 1 << iota
59 LIST_ITEM_CONTAINS_BLOCK
60 LIST_ITEM_BEGINNING_OF_LIST
61 LIST_ITEM_END_OF_LIST
62)
63
64// These are the possible flag values for the table cell renderer.
65// Only a single one of these values will be used; they are not ORed together.
66// These are mostly of interest if you are writing a new output format.
67const (
68 TABLE_ALIGNMENT_LEFT = 1 << iota
69 TABLE_ALIGNMENT_RIGHT
70 TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
71)
72
73// The size of a tab stop.
74const (
75 TAB_SIZE_DEFAULT = 4
76 TAB_SIZE_EIGHT = 8
77)
78
79// These are the tags that are recognized as HTML block tags.
80// Any of these can be included in markdown text without special escaping.
81var blockTags = map[string]bool{
82 "p": true,
83 "dl": true,
84 "h1": true,
85 "h2": true,
86 "h3": true,
87 "h4": true,
88 "h5": true,
89 "h6": true,
90 "ol": true,
91 "ul": true,
92 "del": true,
93 "div": true,
94 "ins": true,
95 "pre": true,
96 "form": true,
97 "math": true,
98 "table": true,
99 "iframe": true,
100 "script": true,
101 "fieldset": true,
102 "noscript": true,
103 "blockquote": true,
104
105 // HTML5
106 "video": true,
107 "aside": true,
108 "canvas": true,
109 "figure": true,
110 "footer": true,
111 "header": true,
112 "hgroup": true,
113 "output": true,
114 "article": true,
115 "section": true,
116 "progress": true,
117 "figcaption": true,
118}
119
120// Renderer is the rendering interface.
121// This is mostly of interest if you are implementing a new rendering format.
122//
123// When a byte slice is provided, it contains the (rendered) contents of the
124// element.
125//
126// When a callback is provided instead, it will write the contents of the
127// respective element directly to the output buffer and return true on success.
128// If the callback returns false, the rendering function should reset the
129// output buffer as though it had never been called.
130//
131// Currently Html and Latex implementations are provided
132type Renderer interface {
133 // block-level callbacks
134 BlockCode(out *bytes.Buffer, text []byte, lang string)
135 BlockQuote(out *bytes.Buffer, text []byte)
136 BlockHtml(out *bytes.Buffer, text []byte)
137 Header(out *bytes.Buffer, text func() bool, level int, id string)
138 HRule(out *bytes.Buffer)
139 List(out *bytes.Buffer, text func() bool, flags int)
140 ListItem(out *bytes.Buffer, text []byte, flags int)
141 Paragraph(out *bytes.Buffer, text func() bool)
142 Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
143 TableRow(out *bytes.Buffer, text []byte)
144 TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
145 TableCell(out *bytes.Buffer, text []byte, flags int)
146 Footnotes(out *bytes.Buffer, text func() bool)
147 FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
148
149 // Span-level callbacks
150 AutoLink(out *bytes.Buffer, link []byte, kind int)
151 CodeSpan(out *bytes.Buffer, text []byte)
152 DoubleEmphasis(out *bytes.Buffer, text []byte)
153 Emphasis(out *bytes.Buffer, text []byte)
154 Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
155 LineBreak(out *bytes.Buffer)
156 Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
157 RawHtmlTag(out *bytes.Buffer, tag []byte)
158 TripleEmphasis(out *bytes.Buffer, text []byte)
159 StrikeThrough(out *bytes.Buffer, text []byte)
160 FootnoteRef(out *bytes.Buffer, ref []byte, id int)
161
162 // Low-level callbacks
163 Entity(out *bytes.Buffer, entity []byte)
164 NormalText(out *bytes.Buffer, text []byte)
165
166 // Header and footer
167 DocumentHeader(out *bytes.Buffer)
168 DocumentFooter(out *bytes.Buffer)
169
170 GetFlags() int
171}
172
173// Callback functions for inline parsing. One such function is defined
174// for each character that triggers a response when parsing inline data.
175type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
176
177// Parser holds runtime state used by the parser.
178// This is constructed by the Markdown function.
179type parser struct {
180 r Renderer
181 refs map[string]*reference
182 inlineCallback [256]inlineParser
183 flags int
184 nesting int
185 maxNesting int
186 insideLink bool
187
188 // Footnotes need to be ordered as well as available to quickly check for
189 // presence. If a ref is also a footnote, it's stored both in refs and here
190 // in notes. Slice is nil if footnotes not enabled.
191 notes []*reference
192}
193
194//
195//
196// Public interface
197//
198//
199
200// MarkdownBasic is a convenience function for simple rendering.
201// It processes markdown input with no extensions enabled.
202func MarkdownBasic(input []byte) []byte {
203 // set up the HTML renderer
204 htmlFlags := HTML_USE_XHTML
205 renderer := HtmlRenderer(htmlFlags, "", "")
206
207 // set up the parser
208 extensions := 0
209
210 return Markdown(input, renderer, extensions)
211}
212
213// Call Markdown with most useful extensions enabled
214// MarkdownCommon is a convenience function for simple rendering.
215// It processes markdown input with common extensions enabled, including:
216//
217// * Smartypants processing with smart fractions and LaTeX dashes
218//
219// * Intra-word emphasis suppression
220//
221// * Tables
222//
223// * Fenced code blocks
224//
225// * Autolinking
226//
227// * Strikethrough support
228//
229// * Strict header parsing
230//
231// * Custom Header IDs
232func MarkdownCommon(input []byte) []byte {
233 // set up the HTML renderer
234 htmlFlags := 0
235 htmlFlags |= HTML_USE_XHTML
236 htmlFlags |= HTML_USE_SMARTYPANTS
237 htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
238 htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
239 htmlFlags |= HTML_SANITIZE_OUTPUT
240 renderer := HtmlRenderer(htmlFlags, "", "")
241
242 // set up the parser
243 extensions := 0
244 extensions |= EXTENSION_NO_INTRA_EMPHASIS
245 extensions |= EXTENSION_TABLES
246 extensions |= EXTENSION_FENCED_CODE
247 extensions |= EXTENSION_AUTOLINK
248 extensions |= EXTENSION_STRIKETHROUGH
249 extensions |= EXTENSION_SPACE_HEADERS
250 extensions |= EXTENSION_HEADER_IDS
251
252 return Markdown(input, renderer, extensions)
253}
254
255// Markdown is the main rendering function.
256// It parses and renders a block of markdown-encoded text.
257// The supplied Renderer is used to format the output, and extensions dictates
258// which non-standard extensions are enabled.
259//
260// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
261// LatexRenderer, respectively.
262func Markdown(input []byte, renderer Renderer, extensions int) []byte {
263 // no point in parsing if we can't render
264 if renderer == nil {
265 return nil
266 }
267
268 // fill in the render structure
269 p := new(parser)
270 p.r = renderer
271 p.flags = extensions
272 p.refs = make(map[string]*reference)
273 p.maxNesting = 16
274 p.insideLink = false
275
276 // register inline parsers
277 p.inlineCallback['*'] = emphasis
278 p.inlineCallback['_'] = emphasis
279 if extensions&EXTENSION_STRIKETHROUGH != 0 {
280 p.inlineCallback['~'] = emphasis
281 }
282 p.inlineCallback['`'] = codeSpan
283 p.inlineCallback['\n'] = lineBreak
284 p.inlineCallback['['] = link
285 p.inlineCallback['<'] = leftAngle
286 p.inlineCallback['\\'] = escape
287 p.inlineCallback['&'] = entity
288
289 if extensions&EXTENSION_AUTOLINK != 0 {
290 p.inlineCallback[':'] = autoLink
291 }
292
293 if extensions&EXTENSION_FOOTNOTES != 0 {
294 p.notes = make([]*reference, 0)
295 }
296
297 first := firstPass(p, input)
298 second := secondPass(p, first)
299
300 if renderer.GetFlags()&HTML_SANITIZE_OUTPUT != 0 {
301 second = sanitizeHtml(second)
302 }
303
304 return second
305}
306
307// first pass:
308// - extract references
309// - expand tabs
310// - normalize newlines
311// - copy everything else
312// - add missing newlines before fenced code blocks
313func firstPass(p *parser, input []byte) []byte {
314 var out bytes.Buffer
315 tabSize := TAB_SIZE_DEFAULT
316 if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
317 tabSize = TAB_SIZE_EIGHT
318 }
319 beg, end := 0, 0
320 lastLineWasBlank := false
321 lastFencedCodeBlockEnd := 0
322 for beg < len(input) { // iterate over lines
323 if end = isReference(p, input[beg:], tabSize); end > 0 {
324 beg += end
325 } else { // skip to the next line
326 end = beg
327 for end < len(input) && input[end] != '\n' && input[end] != '\r' {
328 end++
329 }
330
331 if p.flags&EXTENSION_FENCED_CODE != 0 {
332 // when last line was none blank and a fenced code block comes after
333 if beg >= lastFencedCodeBlockEnd {
334 if i := p.fencedCode(&out, append(input[beg:], '\n'), false); i > 0 {
335 if !lastLineWasBlank {
336 out.WriteByte('\n') // need to inject additional linebreak
337 }
338 lastFencedCodeBlockEnd = beg + i
339 }
340 }
341 lastLineWasBlank = end == beg
342 }
343
344 // add the line body if present
345 if end > beg {
346 expandTabs(&out, input[beg:end], tabSize)
347 }
348 out.WriteByte('\n')
349
350 if end < len(input) && input[end] == '\r' {
351 end++
352 }
353 if end < len(input) && input[end] == '\n' {
354 end++
355 }
356
357 beg = end
358 }
359 }
360
361 // empty input?
362 if out.Len() == 0 {
363 out.WriteByte('\n')
364 }
365
366 return out.Bytes()
367}
368
369// second pass: actual rendering
370func secondPass(p *parser, input []byte) []byte {
371 var output bytes.Buffer
372
373 p.r.DocumentHeader(&output)
374 p.block(&output, input)
375
376 if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
377 p.r.Footnotes(&output, func() bool {
378 flags := LIST_ITEM_BEGINNING_OF_LIST
379 for _, ref := range p.notes {
380 var buf bytes.Buffer
381 if ref.hasBlock {
382 flags |= LIST_ITEM_CONTAINS_BLOCK
383 p.block(&buf, ref.title)
384 } else {
385 p.inline(&buf, ref.title)
386 }
387 p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
388 flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
389 }
390
391 return true
392 })
393 }
394
395 p.r.DocumentFooter(&output)
396
397 if p.nesting != 0 {
398 panic("Nesting level did not end at zero")
399 }
400
401 return output.Bytes()
402}
403
404//
405// Link references
406//
407// This section implements support for references that (usually) appear
408// as footnotes in a document, and can be referenced anywhere in the document.
409// The basic format is:
410//
411// [1]: http://www.google.com/ "Google"
412// [2]: http://www.github.com/ "Github"
413//
414// Anywhere in the document, the reference can be linked by referring to its
415// label, i.e., 1 and 2 in this example, as in:
416//
417// This library is hosted on [Github][2], a git hosting site.
418//
419// Actual footnotes as specified in Pandoc and supported by some other Markdown
420// libraries such as php-markdown are also taken care of. They look like this:
421//
422// This sentence needs a bit of further explanation.[^note]
423//
424// [^note]: This is the explanation.
425//
426// Footnotes should be placed at the end of the document in an ordered list.
427// Inline footnotes such as:
428//
429// Inline footnotes^[Not supported.] also exist.
430//
431// are not yet supported.
432
433// References are parsed and stored in this struct.
434type reference struct {
435 link []byte
436 title []byte
437 noteId int // 0 if not a footnote ref
438 hasBlock bool
439}
440
441// Check whether or not data starts with a reference link.
442// If so, it is parsed and stored in the list of references
443// (in the render struct).
444// Returns the number of bytes to skip to move past it,
445// or zero if the first line is not a reference.
446func isReference(p *parser, data []byte, tabSize int) int {
447 // up to 3 optional leading spaces
448 if len(data) < 4 {
449 return 0
450 }
451 i := 0
452 for i < 3 && data[i] == ' ' {
453 i++
454 }
455
456 noteId := 0
457
458 // id part: anything but a newline between brackets
459 if data[i] != '[' {
460 return 0
461 }
462 i++
463 if p.flags&EXTENSION_FOOTNOTES != 0 {
464 if data[i] == '^' {
465 // we can set it to anything here because the proper noteIds will
466 // be assigned later during the second pass. It just has to be != 0
467 noteId = 1
468 i++
469 }
470 }
471 idOffset := i
472 for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
473 i++
474 }
475 if i >= len(data) || data[i] != ']' {
476 return 0
477 }
478 idEnd := i
479
480 // spacer: colon (space | tab)* newline? (space | tab)*
481 i++
482 if i >= len(data) || data[i] != ':' {
483 return 0
484 }
485 i++
486 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
487 i++
488 }
489 if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
490 i++
491 if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
492 i++
493 }
494 }
495 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
496 i++
497 }
498 if i >= len(data) {
499 return 0
500 }
501
502 var (
503 linkOffset, linkEnd int
504 titleOffset, titleEnd int
505 lineEnd int
506 raw []byte
507 hasBlock bool
508 )
509
510 if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
511 linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
512 lineEnd = linkEnd
513 } else {
514 linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
515 }
516 if lineEnd == 0 {
517 return 0
518 }
519
520 // a valid ref has been found
521
522 ref := &reference{
523 noteId: noteId,
524 hasBlock: hasBlock,
525 }
526
527 if noteId > 0 {
528 // reusing the link field for the id since footnotes don't have links
529 ref.link = data[idOffset:idEnd]
530 // if footnote, it's not really a title, it's the contained text
531 ref.title = raw
532 } else {
533 ref.link = data[linkOffset:linkEnd]
534 ref.title = data[titleOffset:titleEnd]
535 }
536
537 // id matches are case-insensitive
538 id := string(bytes.ToLower(data[idOffset:idEnd]))
539
540 p.refs[id] = ref
541
542 return lineEnd
543}
544
545func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
546 // link: whitespace-free sequence, optionally between angle brackets
547 if data[i] == '<' {
548 i++
549 }
550 linkOffset = i
551 for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
552 i++
553 }
554 linkEnd = i
555 if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
556 linkOffset++
557 linkEnd--
558 }
559
560 // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
561 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
562 i++
563 }
564 if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
565 return
566 }
567
568 // compute end-of-line
569 if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
570 lineEnd = i
571 }
572 if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
573 lineEnd++
574 }
575
576 // optional (space|tab)* spacer after a newline
577 if lineEnd > 0 {
578 i = lineEnd + 1
579 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
580 i++
581 }
582 }
583
584 // optional title: any non-newline sequence enclosed in '"() alone on its line
585 if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
586 i++
587 titleOffset = i
588
589 // look for EOL
590 for i < len(data) && data[i] != '\n' && data[i] != '\r' {
591 i++
592 }
593 if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
594 titleEnd = i + 1
595 } else {
596 titleEnd = i
597 }
598
599 // step back
600 i--
601 for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
602 i--
603 }
604 if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
605 lineEnd = titleEnd
606 titleEnd = i
607 }
608 }
609
610 return
611}
612
613// The first bit of this logic is the same as (*parser).listItem, but the rest
614// is much simpler. This function simply finds the entire block and shifts it
615// over by one tab if it is indeed a block (just returns the line if it's not).
616// blockEnd is the end of the section in the input buffer, and contents is the
617// extracted text that was shifted over one tab. It will need to be rendered at
618// the end of the document.
619func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
620 if i == 0 || len(data) == 0 {
621 return
622 }
623
624 // skip leading whitespace on first line
625 for i < len(data) && data[i] == ' ' {
626 i++
627 }
628
629 blockStart = i
630
631 // find the end of the line
632 blockEnd = i
633 for i < len(data) && data[i-1] != '\n' {
634 i++
635 }
636
637 // get working buffer
638 var raw bytes.Buffer
639
640 // put the first line into the working buffer
641 raw.Write(data[blockEnd:i])
642 blockEnd = i
643
644 // process the following lines
645 containsBlankLine := false
646
647gatherLines:
648 for blockEnd < len(data) {
649 i++
650
651 // find the end of this line
652 for i < len(data) && data[i-1] != '\n' {
653 i++
654 }
655
656 // if it is an empty line, guess that it is part of this item
657 // and move on to the next line
658 if p.isEmpty(data[blockEnd:i]) > 0 {
659 containsBlankLine = true
660 blockEnd = i
661 continue
662 }
663
664 n := 0
665 if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
666 // this is the end of the block.
667 // we don't want to include this last line in the index.
668 break gatherLines
669 }
670
671 // if there were blank lines before this one, insert a new one now
672 if containsBlankLine {
673 raw.WriteByte('\n')
674 containsBlankLine = false
675 }
676
677 // get rid of that first tab, write to buffer
678 raw.Write(data[blockEnd+n : i])
679 hasBlock = true
680
681 blockEnd = i
682 }
683
684 if data[blockEnd-1] != '\n' {
685 raw.WriteByte('\n')
686 }
687
688 contents = raw.Bytes()
689
690 return
691}
692
693//
694//
695// Miscellaneous helper functions
696//
697//
698
699// Test if a character is a punctuation symbol.
700// Taken from a private function in regexp in the stdlib.
701func ispunct(c byte) bool {
702 for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
703 if c == r {
704 return true
705 }
706 }
707 return false
708}
709
710// Test if a character is a whitespace character.
711func isspace(c byte) bool {
712 return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
713}
714
715// Test if a character is letter.
716func isletter(c byte) bool {
717 return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
718}
719
720// Test if a character is a letter or a digit.
721// TODO: check when this is looking for ASCII alnum and when it should use unicode
722func isalnum(c byte) bool {
723 return (c >= '0' && c <= '9') || isletter(c)
724}
725
726// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
727// always ends output with a newline
728func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
729 // first, check for common cases: no tabs, or only tabs at beginning of line
730 i, prefix := 0, 0
731 slowcase := false
732 for i = 0; i < len(line); i++ {
733 if line[i] == '\t' {
734 if prefix == i {
735 prefix++
736 } else {
737 slowcase = true
738 break
739 }
740 }
741 }
742
743 // no need to decode runes if all tabs are at the beginning of the line
744 if !slowcase {
745 for i = 0; i < prefix*tabSize; i++ {
746 out.WriteByte(' ')
747 }
748 out.Write(line[prefix:])
749 return
750 }
751
752 // the slow case: we need to count runes to figure out how
753 // many spaces to insert for each tab
754 column := 0
755 i = 0
756 for i < len(line) {
757 start := i
758 for i < len(line) && line[i] != '\t' {
759 _, size := utf8.DecodeRune(line[i:])
760 i += size
761 column++
762 }
763
764 if i > start {
765 out.Write(line[start:i])
766 }
767
768 if i >= len(line) {
769 break
770 }
771
772 for {
773 out.WriteByte(' ')
774 column++
775 if column%tabSize == 0 {
776 break
777 }
778 }
779
780 i++
781 }
782}
783
784// Find if a line counts as indented or not.
785// Returns number of characters the indent is (0 = not indented).
786func isIndented(data []byte, indentSize int) int {
787 if len(data) == 0 {
788 return 0
789 }
790 if data[0] == '\t' {
791 return 1
792 }
793 if len(data) < indentSize {
794 return 0
795 }
796 for i := 0; i < indentSize; i++ {
797 if data[i] != ' ' {
798 return 0
799 }
800 }
801 return indentSize
802}
803
804// Create a url-safe slug for fragments
805func slugify(in []byte) []byte {
806 if len(in) == 0 {
807 return in
808 }
809 out := make([]byte, 0, len(in))
810 sym := false
811
812 for _, ch := range in {
813 if isalnum(ch) {
814 sym = false
815 out = append(out, ch)
816 } else if sym {
817 continue
818 } else {
819 out = append(out, '-')
820 sym = true
821 }
822 }
823 var a, b int
824 var ch byte
825 for a, ch = range out {
826 if ch != '-' {
827 break
828 }
829 }
830 for b = len(out) - 1; b > 0; b-- {
831 if out[b] != '-' {
832 break
833 }
834 }
835 return out[a : b+1]
836}