markdown.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11//
12// Markdown parsing and processing
13//
14//
15
16// Blackfriday markdown processor.
17//
18// Translates plain text with simple formatting rules into HTML or LaTeX.
19package blackfriday
20
21import (
22 "bytes"
23 "unicode/utf8"
24)
25
26const VERSION = "1.1"
27
28// These are the supported markdown parsing extensions.
29// OR these values together to select multiple extensions.
30const (
31 EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
32 EXTENSION_TABLES // render tables
33 EXTENSION_FENCED_CODE // render fenced code blocks
34 EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
35 EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
36 EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
37 EXTENSION_SPACE_HEADERS // be strict about prefix header rules
38 EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
39 EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
40 EXTENSION_FOOTNOTES // Pandoc-style footnotes
41 EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, order list, unorder list)block
42 EXTENSION_HEADER_IDS // specify header IDs with {#id}
43 EXTENSION_TITLEBLOCK // Titleblock ala pandoc
44)
45
46// These are the possible flag values for the link renderer.
47// Only a single one of these values will be used; they are not ORed together.
48// These are mostly of interest if you are writing a new output format.
49const (
50 LINK_TYPE_NOT_AUTOLINK = iota
51 LINK_TYPE_NORMAL
52 LINK_TYPE_EMAIL
53)
54
55// These are the possible flag values for the ListItem renderer.
56// Multiple flag values may be ORed together.
57// These are mostly of interest if you are writing a new output format.
58const (
59 LIST_TYPE_ORDERED = 1 << iota
60 LIST_ITEM_CONTAINS_BLOCK
61 LIST_ITEM_BEGINNING_OF_LIST
62 LIST_ITEM_END_OF_LIST
63)
64
65// These are the possible flag values for the table cell renderer.
66// Only a single one of these values will be used; they are not ORed together.
67// These are mostly of interest if you are writing a new output format.
68const (
69 TABLE_ALIGNMENT_LEFT = 1 << iota
70 TABLE_ALIGNMENT_RIGHT
71 TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
72)
73
74// The size of a tab stop.
75const (
76 TAB_SIZE_DEFAULT = 4
77 TAB_SIZE_EIGHT = 8
78)
79
80// These are the tags that are recognized as HTML block tags.
81// Any of these can be included in markdown text without special escaping.
82var blockTags = map[string]bool{
83 "p": true,
84 "dl": true,
85 "h1": true,
86 "h2": true,
87 "h3": true,
88 "h4": true,
89 "h5": true,
90 "h6": true,
91 "ol": true,
92 "ul": true,
93 "del": true,
94 "div": true,
95 "ins": true,
96 "pre": true,
97 "form": true,
98 "math": true,
99 "table": true,
100 "iframe": true,
101 "script": true,
102 "fieldset": true,
103 "noscript": true,
104 "blockquote": true,
105
106 // HTML5
107 "video": true,
108 "aside": true,
109 "canvas": true,
110 "figure": true,
111 "footer": true,
112 "header": true,
113 "hgroup": true,
114 "output": true,
115 "article": true,
116 "section": true,
117 "progress": true,
118 "figcaption": true,
119}
120
121// Renderer is the rendering interface.
122// This is mostly of interest if you are implementing a new rendering format.
123//
124// When a byte slice is provided, it contains the (rendered) contents of the
125// element.
126//
127// When a callback is provided instead, it will write the contents of the
128// respective element directly to the output buffer and return true on success.
129// If the callback returns false, the rendering function should reset the
130// output buffer as though it had never been called.
131//
132// Currently Html and Latex implementations are provided
133type Renderer interface {
134 // block-level callbacks
135 BlockCode(out *bytes.Buffer, text []byte, lang string)
136 BlockQuote(out *bytes.Buffer, text []byte)
137 BlockHtml(out *bytes.Buffer, text []byte)
138 Header(out *bytes.Buffer, text func() bool, level int, id string)
139 HRule(out *bytes.Buffer)
140 List(out *bytes.Buffer, text func() bool, flags int)
141 ListItem(out *bytes.Buffer, text []byte, flags int)
142 Paragraph(out *bytes.Buffer, text func() bool)
143 Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
144 TableRow(out *bytes.Buffer, text []byte)
145 TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
146 TableCell(out *bytes.Buffer, text []byte, flags int)
147 Footnotes(out *bytes.Buffer, text func() bool)
148 FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
149 TitleBlock(out *bytes.Buffer, text []byte)
150
151 // Span-level callbacks
152 AutoLink(out *bytes.Buffer, link []byte, kind int)
153 CodeSpan(out *bytes.Buffer, text []byte)
154 DoubleEmphasis(out *bytes.Buffer, text []byte)
155 Emphasis(out *bytes.Buffer, text []byte)
156 Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
157 LineBreak(out *bytes.Buffer)
158 Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
159 RawHtmlTag(out *bytes.Buffer, tag []byte)
160 TripleEmphasis(out *bytes.Buffer, text []byte)
161 StrikeThrough(out *bytes.Buffer, text []byte)
162 FootnoteRef(out *bytes.Buffer, ref []byte, id int)
163
164 // Low-level callbacks
165 Entity(out *bytes.Buffer, entity []byte)
166 NormalText(out *bytes.Buffer, text []byte)
167
168 // Header and footer
169 DocumentHeader(out *bytes.Buffer)
170 DocumentFooter(out *bytes.Buffer)
171
172 GetFlags() int
173}
174
175// Callback functions for inline parsing. One such function is defined
176// for each character that triggers a response when parsing inline data.
177type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
178
179// Parser holds runtime state used by the parser.
180// This is constructed by the Markdown function.
181type parser struct {
182 r Renderer
183 refs map[string]*reference
184 inlineCallback [256]inlineParser
185 flags int
186 nesting int
187 maxNesting int
188 insideLink bool
189
190 // Footnotes need to be ordered as well as available to quickly check for
191 // presence. If a ref is also a footnote, it's stored both in refs and here
192 // in notes. Slice is nil if footnotes not enabled.
193 notes []*reference
194}
195
196//
197//
198// Public interface
199//
200//
201
202// MarkdownBasic is a convenience function for simple rendering.
203// It processes markdown input with no extensions enabled.
204func MarkdownBasic(input []byte) []byte {
205 // set up the HTML renderer
206 htmlFlags := HTML_USE_XHTML
207 renderer := HtmlRenderer(htmlFlags, "", "")
208
209 // set up the parser
210 extensions := 0
211
212 return Markdown(input, renderer, extensions)
213}
214
215// Call Markdown with most useful extensions enabled
216// MarkdownCommon is a convenience function for simple rendering.
217// It processes markdown input with common extensions enabled, including:
218//
219// * Smartypants processing with smart fractions and LaTeX dashes
220//
221// * Intra-word emphasis suppression
222//
223// * Tables
224//
225// * Fenced code blocks
226//
227// * Autolinking
228//
229// * Strikethrough support
230//
231// * Strict header parsing
232//
233// * Custom Header IDs
234func MarkdownCommon(input []byte) []byte {
235 // set up the HTML renderer
236 htmlFlags := 0
237 htmlFlags |= HTML_USE_XHTML
238 htmlFlags |= HTML_USE_SMARTYPANTS
239 htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
240 htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
241 renderer := HtmlRenderer(htmlFlags, "", "")
242
243 // set up the parser
244 extensions := 0
245 extensions |= EXTENSION_NO_INTRA_EMPHASIS
246 extensions |= EXTENSION_TABLES
247 extensions |= EXTENSION_FENCED_CODE
248 extensions |= EXTENSION_AUTOLINK
249 extensions |= EXTENSION_STRIKETHROUGH
250 extensions |= EXTENSION_SPACE_HEADERS
251 extensions |= EXTENSION_HEADER_IDS
252
253 return Markdown(input, renderer, extensions)
254}
255
256// Markdown is the main rendering function.
257// It parses and renders a block of markdown-encoded text.
258// The supplied Renderer is used to format the output, and extensions dictates
259// which non-standard extensions are enabled.
260//
261// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
262// LatexRenderer, respectively.
263func Markdown(input []byte, renderer Renderer, extensions int) []byte {
264 // no point in parsing if we can't render
265 if renderer == nil {
266 return nil
267 }
268
269 // fill in the render structure
270 p := new(parser)
271 p.r = renderer
272 p.flags = extensions
273 p.refs = make(map[string]*reference)
274 p.maxNesting = 16
275 p.insideLink = false
276
277 // register inline parsers
278 p.inlineCallback['*'] = emphasis
279 p.inlineCallback['_'] = emphasis
280 if extensions&EXTENSION_STRIKETHROUGH != 0 {
281 p.inlineCallback['~'] = emphasis
282 }
283 p.inlineCallback['`'] = codeSpan
284 p.inlineCallback['\n'] = lineBreak
285 p.inlineCallback['['] = link
286 p.inlineCallback['<'] = leftAngle
287 p.inlineCallback['\\'] = escape
288 p.inlineCallback['&'] = entity
289
290 if extensions&EXTENSION_AUTOLINK != 0 {
291 p.inlineCallback[':'] = autoLink
292 }
293
294 if extensions&EXTENSION_FOOTNOTES != 0 {
295 p.notes = make([]*reference, 0)
296 }
297
298 first := firstPass(p, input)
299 second := secondPass(p, first)
300 return second
301}
302
303// first pass:
304// - extract references
305// - expand tabs
306// - normalize newlines
307// - copy everything else
308// - add missing newlines before fenced code blocks
309func firstPass(p *parser, input []byte) []byte {
310 var out bytes.Buffer
311 tabSize := TAB_SIZE_DEFAULT
312 if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
313 tabSize = TAB_SIZE_EIGHT
314 }
315 beg, end := 0, 0
316 lastLineWasBlank := false
317 lastFencedCodeBlockEnd := 0
318 for beg < len(input) { // iterate over lines
319 if end = isReference(p, input[beg:], tabSize); end > 0 {
320 beg += end
321 } else { // skip to the next line
322 end = beg
323 for end < len(input) && input[end] != '\n' && input[end] != '\r' {
324 end++
325 }
326
327 if p.flags&EXTENSION_FENCED_CODE != 0 {
328 // when last line was none blank and a fenced code block comes after
329 if beg >= lastFencedCodeBlockEnd {
330 // tmp var so we don't modify beyond bounds of `input`
331 var tmp = make([]byte, len(input[beg:]), len(input[beg:])+1)
332 copy(tmp, input[beg:])
333 if i := p.fencedCode(&out, append(tmp, '\n'), false); i > 0 {
334 if !lastLineWasBlank {
335 out.WriteByte('\n') // need to inject additional linebreak
336 }
337 lastFencedCodeBlockEnd = beg + i
338 }
339 }
340 lastLineWasBlank = end == beg
341 }
342
343 // add the line body if present
344 if end > beg {
345 if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
346 out.Write(input[beg:end])
347 } else {
348 expandTabs(&out, input[beg:end], tabSize)
349 }
350 }
351 out.WriteByte('\n')
352
353 if end < len(input) && input[end] == '\r' {
354 end++
355 }
356 if end < len(input) && input[end] == '\n' {
357 end++
358 }
359
360 beg = end
361 }
362 }
363
364 // empty input?
365 if out.Len() == 0 {
366 out.WriteByte('\n')
367 }
368
369 return out.Bytes()
370}
371
372// second pass: actual rendering
373func secondPass(p *parser, input []byte) []byte {
374 var output bytes.Buffer
375
376 p.r.DocumentHeader(&output)
377 p.block(&output, input)
378
379 if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
380 p.r.Footnotes(&output, func() bool {
381 flags := LIST_ITEM_BEGINNING_OF_LIST
382 for _, ref := range p.notes {
383 var buf bytes.Buffer
384 if ref.hasBlock {
385 flags |= LIST_ITEM_CONTAINS_BLOCK
386 p.block(&buf, ref.title)
387 } else {
388 p.inline(&buf, ref.title)
389 }
390 p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
391 flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
392 }
393
394 return true
395 })
396 }
397
398 p.r.DocumentFooter(&output)
399
400 if p.nesting != 0 {
401 panic("Nesting level did not end at zero")
402 }
403
404 return output.Bytes()
405}
406
407//
408// Link references
409//
410// This section implements support for references that (usually) appear
411// as footnotes in a document, and can be referenced anywhere in the document.
412// The basic format is:
413//
414// [1]: http://www.google.com/ "Google"
415// [2]: http://www.github.com/ "Github"
416//
417// Anywhere in the document, the reference can be linked by referring to its
418// label, i.e., 1 and 2 in this example, as in:
419//
420// This library is hosted on [Github][2], a git hosting site.
421//
422// Actual footnotes as specified in Pandoc and supported by some other Markdown
423// libraries such as php-markdown are also taken care of. They look like this:
424//
425// This sentence needs a bit of further explanation.[^note]
426//
427// [^note]: This is the explanation.
428//
429// Footnotes should be placed at the end of the document in an ordered list.
430// Inline footnotes such as:
431//
432// Inline footnotes^[Not supported.] also exist.
433//
434// are not yet supported.
435
436// References are parsed and stored in this struct.
437type reference struct {
438 link []byte
439 title []byte
440 noteId int // 0 if not a footnote ref
441 hasBlock bool
442}
443
444// Check whether or not data starts with a reference link.
445// If so, it is parsed and stored in the list of references
446// (in the render struct).
447// Returns the number of bytes to skip to move past it,
448// or zero if the first line is not a reference.
449func isReference(p *parser, data []byte, tabSize int) int {
450 // up to 3 optional leading spaces
451 if len(data) < 4 {
452 return 0
453 }
454 i := 0
455 for i < 3 && data[i] == ' ' {
456 i++
457 }
458
459 noteId := 0
460
461 // id part: anything but a newline between brackets
462 if data[i] != '[' {
463 return 0
464 }
465 i++
466 if p.flags&EXTENSION_FOOTNOTES != 0 {
467 if data[i] == '^' {
468 // we can set it to anything here because the proper noteIds will
469 // be assigned later during the second pass. It just has to be != 0
470 noteId = 1
471 i++
472 }
473 }
474 idOffset := i
475 for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
476 i++
477 }
478 if i >= len(data) || data[i] != ']' {
479 return 0
480 }
481 idEnd := i
482
483 // spacer: colon (space | tab)* newline? (space | tab)*
484 i++
485 if i >= len(data) || data[i] != ':' {
486 return 0
487 }
488 i++
489 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
490 i++
491 }
492 if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
493 i++
494 if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
495 i++
496 }
497 }
498 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
499 i++
500 }
501 if i >= len(data) {
502 return 0
503 }
504
505 var (
506 linkOffset, linkEnd int
507 titleOffset, titleEnd int
508 lineEnd int
509 raw []byte
510 hasBlock bool
511 )
512
513 if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
514 linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
515 lineEnd = linkEnd
516 } else {
517 linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
518 }
519 if lineEnd == 0 {
520 return 0
521 }
522
523 // a valid ref has been found
524
525 ref := &reference{
526 noteId: noteId,
527 hasBlock: hasBlock,
528 }
529
530 if noteId > 0 {
531 // reusing the link field for the id since footnotes don't have links
532 ref.link = data[idOffset:idEnd]
533 // if footnote, it's not really a title, it's the contained text
534 ref.title = raw
535 } else {
536 ref.link = data[linkOffset:linkEnd]
537 ref.title = data[titleOffset:titleEnd]
538 }
539
540 // id matches are case-insensitive
541 id := string(bytes.ToLower(data[idOffset:idEnd]))
542
543 p.refs[id] = ref
544
545 return lineEnd
546}
547
548func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
549 // link: whitespace-free sequence, optionally between angle brackets
550 if data[i] == '<' {
551 i++
552 }
553 linkOffset = i
554 for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
555 i++
556 }
557 linkEnd = i
558 if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
559 linkOffset++
560 linkEnd--
561 }
562
563 // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
564 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
565 i++
566 }
567 if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
568 return
569 }
570
571 // compute end-of-line
572 if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
573 lineEnd = i
574 }
575 if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
576 lineEnd++
577 }
578
579 // optional (space|tab)* spacer after a newline
580 if lineEnd > 0 {
581 i = lineEnd + 1
582 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
583 i++
584 }
585 }
586
587 // optional title: any non-newline sequence enclosed in '"() alone on its line
588 if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
589 i++
590 titleOffset = i
591
592 // look for EOL
593 for i < len(data) && data[i] != '\n' && data[i] != '\r' {
594 i++
595 }
596 if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
597 titleEnd = i + 1
598 } else {
599 titleEnd = i
600 }
601
602 // step back
603 i--
604 for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
605 i--
606 }
607 if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
608 lineEnd = titleEnd
609 titleEnd = i
610 }
611 }
612
613 return
614}
615
616// The first bit of this logic is the same as (*parser).listItem, but the rest
617// is much simpler. This function simply finds the entire block and shifts it
618// over by one tab if it is indeed a block (just returns the line if it's not).
619// blockEnd is the end of the section in the input buffer, and contents is the
620// extracted text that was shifted over one tab. It will need to be rendered at
621// the end of the document.
622func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
623 if i == 0 || len(data) == 0 {
624 return
625 }
626
627 // skip leading whitespace on first line
628 for i < len(data) && data[i] == ' ' {
629 i++
630 }
631
632 blockStart = i
633
634 // find the end of the line
635 blockEnd = i
636 for i < len(data) && data[i-1] != '\n' {
637 i++
638 }
639
640 // get working buffer
641 var raw bytes.Buffer
642
643 // put the first line into the working buffer
644 raw.Write(data[blockEnd:i])
645 blockEnd = i
646
647 // process the following lines
648 containsBlankLine := false
649
650gatherLines:
651 for blockEnd < len(data) {
652 i++
653
654 // find the end of this line
655 for i < len(data) && data[i-1] != '\n' {
656 i++
657 }
658
659 // if it is an empty line, guess that it is part of this item
660 // and move on to the next line
661 if p.isEmpty(data[blockEnd:i]) > 0 {
662 containsBlankLine = true
663 blockEnd = i
664 continue
665 }
666
667 n := 0
668 if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
669 // this is the end of the block.
670 // we don't want to include this last line in the index.
671 break gatherLines
672 }
673
674 // if there were blank lines before this one, insert a new one now
675 if containsBlankLine {
676 raw.WriteByte('\n')
677 containsBlankLine = false
678 }
679
680 // get rid of that first tab, write to buffer
681 raw.Write(data[blockEnd+n : i])
682 hasBlock = true
683
684 blockEnd = i
685 }
686
687 if data[blockEnd-1] != '\n' {
688 raw.WriteByte('\n')
689 }
690
691 contents = raw.Bytes()
692
693 return
694}
695
696//
697//
698// Miscellaneous helper functions
699//
700//
701
702// Test if a character is a punctuation symbol.
703// Taken from a private function in regexp in the stdlib.
704func ispunct(c byte) bool {
705 for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
706 if c == r {
707 return true
708 }
709 }
710 return false
711}
712
713// Test if a character is a whitespace character.
714func isspace(c byte) bool {
715 return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
716}
717
718// Test if a character is letter.
719func isletter(c byte) bool {
720 return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
721}
722
723// Test if a character is a letter or a digit.
724// TODO: check when this is looking for ASCII alnum and when it should use unicode
725func isalnum(c byte) bool {
726 return (c >= '0' && c <= '9') || isletter(c)
727}
728
729// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
730// always ends output with a newline
731func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
732 // first, check for common cases: no tabs, or only tabs at beginning of line
733 i, prefix := 0, 0
734 slowcase := false
735 for i = 0; i < len(line); i++ {
736 if line[i] == '\t' {
737 if prefix == i {
738 prefix++
739 } else {
740 slowcase = true
741 break
742 }
743 }
744 }
745
746 // no need to decode runes if all tabs are at the beginning of the line
747 if !slowcase {
748 for i = 0; i < prefix*tabSize; i++ {
749 out.WriteByte(' ')
750 }
751 out.Write(line[prefix:])
752 return
753 }
754
755 // the slow case: we need to count runes to figure out how
756 // many spaces to insert for each tab
757 column := 0
758 i = 0
759 for i < len(line) {
760 start := i
761 for i < len(line) && line[i] != '\t' {
762 _, size := utf8.DecodeRune(line[i:])
763 i += size
764 column++
765 }
766
767 if i > start {
768 out.Write(line[start:i])
769 }
770
771 if i >= len(line) {
772 break
773 }
774
775 for {
776 out.WriteByte(' ')
777 column++
778 if column%tabSize == 0 {
779 break
780 }
781 }
782
783 i++
784 }
785}
786
787// Find if a line counts as indented or not.
788// Returns number of characters the indent is (0 = not indented).
789func isIndented(data []byte, indentSize int) int {
790 if len(data) == 0 {
791 return 0
792 }
793 if data[0] == '\t' {
794 return 1
795 }
796 if len(data) < indentSize {
797 return 0
798 }
799 for i := 0; i < indentSize; i++ {
800 if data[i] != ' ' {
801 return 0
802 }
803 }
804 return indentSize
805}
806
807// Create a url-safe slug for fragments
808func slugify(in []byte) []byte {
809 if len(in) == 0 {
810 return in
811 }
812 out := make([]byte, 0, len(in))
813 sym := false
814
815 for _, ch := range in {
816 if isalnum(ch) {
817 sym = false
818 out = append(out, ch)
819 } else if sym {
820 continue
821 } else {
822 out = append(out, '-')
823 sym = true
824 }
825 }
826 var a, b int
827 var ch byte
828 for a, ch = range out {
829 if ch != '-' {
830 break
831 }
832 }
833 for b = len(out) - 1; b > 0; b-- {
834 if out[b] != '-' {
835 break
836 }
837 }
838 return out[a : b+1]
839}