markdown.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11//
12// Markdown parsing and processing
13//
14//
15
16// Blackfriday markdown processor.
17//
18// Translates plain text with simple formatting rules into HTML or LaTeX.
19package blackfriday
20
21import (
22 "bytes"
23 "unicode/utf8"
24)
25
26const VERSION = "1.1"
27
28// These are the supported markdown parsing extensions.
29// OR these values together to select multiple extensions.
30const (
31 EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
32 EXTENSION_TABLES // render tables
33 EXTENSION_FENCED_CODE // render fenced code blocks
34 EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
35 EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
36 EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
37 EXTENSION_SPACE_HEADERS // be strict about prefix header rules
38 EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
39 EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
40 EXTENSION_FOOTNOTES // Pandoc-style footnotes
41 EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, order list, unorder list)block
42 EXTENSION_HEADER_IDS // specify header IDs with {#id}
43)
44
45// These are the possible flag values for the link renderer.
46// Only a single one of these values will be used; they are not ORed together.
47// These are mostly of interest if you are writing a new output format.
48const (
49 LINK_TYPE_NOT_AUTOLINK = iota
50 LINK_TYPE_NORMAL
51 LINK_TYPE_EMAIL
52)
53
54// These are the possible flag values for the ListItem renderer.
55// Multiple flag values may be ORed together.
56// These are mostly of interest if you are writing a new output format.
57const (
58 LIST_TYPE_ORDERED = 1 << iota
59 LIST_ITEM_CONTAINS_BLOCK
60 LIST_ITEM_BEGINNING_OF_LIST
61 LIST_ITEM_END_OF_LIST
62)
63
64// These are the possible flag values for the table cell renderer.
65// Only a single one of these values will be used; they are not ORed together.
66// These are mostly of interest if you are writing a new output format.
67const (
68 TABLE_ALIGNMENT_LEFT = 1 << iota
69 TABLE_ALIGNMENT_RIGHT
70 TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
71)
72
73// The size of a tab stop.
74const (
75 TAB_SIZE_DEFAULT = 4
76 TAB_SIZE_EIGHT = 8
77)
78
79// These are the tags that are recognized as HTML block tags.
80// Any of these can be included in markdown text without special escaping.
81var blockTags = map[string]bool{
82 "p": true,
83 "dl": true,
84 "h1": true,
85 "h2": true,
86 "h3": true,
87 "h4": true,
88 "h5": true,
89 "h6": true,
90 "ol": true,
91 "ul": true,
92 "del": true,
93 "div": true,
94 "ins": true,
95 "pre": true,
96 "form": true,
97 "math": true,
98 "table": true,
99 "iframe": true,
100 "script": true,
101 "fieldset": true,
102 "noscript": true,
103 "blockquote": true,
104
105 // HTML5
106 "video": true,
107 "aside": true,
108 "canvas": true,
109 "figure": true,
110 "footer": true,
111 "header": true,
112 "hgroup": true,
113 "output": true,
114 "article": true,
115 "section": true,
116 "progress": true,
117 "figcaption": true,
118}
119
120// Renderer is the rendering interface.
121// This is mostly of interest if you are implementing a new rendering format.
122//
123// When a byte slice is provided, it contains the (rendered) contents of the
124// element.
125//
126// When a callback is provided instead, it will write the contents of the
127// respective element directly to the output buffer and return true on success.
128// If the callback returns false, the rendering function should reset the
129// output buffer as though it had never been called.
130//
131// Currently Html and Latex implementations are provided
132type Renderer interface {
133 // block-level callbacks
134 BlockCode(out *bytes.Buffer, text []byte, lang string)
135 BlockQuote(out *bytes.Buffer, text []byte)
136 BlockHtml(out *bytes.Buffer, text []byte)
137 Header(out *bytes.Buffer, text func() bool, level int, id string)
138 HRule(out *bytes.Buffer)
139 List(out *bytes.Buffer, text func() bool, flags int)
140 ListItem(out *bytes.Buffer, text []byte, flags int)
141 Paragraph(out *bytes.Buffer, text func() bool)
142 Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
143 TableRow(out *bytes.Buffer, text []byte)
144 TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
145 TableCell(out *bytes.Buffer, text []byte, flags int)
146 Footnotes(out *bytes.Buffer, text func() bool)
147 FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
148
149 // Span-level callbacks
150 AutoLink(out *bytes.Buffer, link []byte, kind int)
151 CodeSpan(out *bytes.Buffer, text []byte)
152 DoubleEmphasis(out *bytes.Buffer, text []byte)
153 Emphasis(out *bytes.Buffer, text []byte)
154 Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
155 LineBreak(out *bytes.Buffer)
156 Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
157 RawHtmlTag(out *bytes.Buffer, tag []byte)
158 TripleEmphasis(out *bytes.Buffer, text []byte)
159 StrikeThrough(out *bytes.Buffer, text []byte)
160 FootnoteRef(out *bytes.Buffer, ref []byte, id int)
161
162 // Low-level callbacks
163 Entity(out *bytes.Buffer, entity []byte)
164 NormalText(out *bytes.Buffer, text []byte)
165
166 // Header and footer
167 DocumentHeader(out *bytes.Buffer)
168 DocumentFooter(out *bytes.Buffer)
169
170 GetFlags() int
171}
172
173// Callback functions for inline parsing. One such function is defined
174// for each character that triggers a response when parsing inline data.
175type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
176
177// Parser holds runtime state used by the parser.
178// This is constructed by the Markdown function.
179type parser struct {
180 r Renderer
181 refs map[string]*reference
182 inlineCallback [256]inlineParser
183 flags int
184 nesting int
185 maxNesting int
186 insideLink bool
187
188 // Footnotes need to be ordered as well as available to quickly check for
189 // presence. If a ref is also a footnote, it's stored both in refs and here
190 // in notes. Slice is nil if footnotes not enabled.
191 notes []*reference
192}
193
194//
195//
196// Public interface
197//
198//
199
200// MarkdownBasic is a convenience function for simple rendering.
201// It processes markdown input with no extensions enabled.
202func MarkdownBasic(input []byte) []byte {
203 // set up the HTML renderer
204 htmlFlags := HTML_USE_XHTML
205 renderer := HtmlRenderer(htmlFlags, "", "")
206
207 // set up the parser
208 extensions := 0
209
210 return Markdown(input, renderer, extensions)
211}
212
213// Call Markdown with most useful extensions enabled
214// MarkdownCommon is a convenience function for simple rendering.
215// It processes markdown input with common extensions enabled, including:
216//
217// * Smartypants processing with smart fractions and LaTeX dashes
218//
219// * Intra-word emphasis suppression
220//
221// * Tables
222//
223// * Fenced code blocks
224//
225// * Autolinking
226//
227// * Strikethrough support
228//
229// * Strict header parsing
230//
231// * Custom Header IDs
232func MarkdownCommon(input []byte) []byte {
233 // set up the HTML renderer
234 htmlFlags := 0
235 htmlFlags |= HTML_USE_XHTML
236 htmlFlags |= HTML_USE_SMARTYPANTS
237 htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
238 htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
239 htmlFlags |= HTML_SANITIZE_OUTPUT
240 renderer := HtmlRenderer(htmlFlags, "", "")
241
242 // set up the parser
243 extensions := 0
244 extensions |= EXTENSION_NO_INTRA_EMPHASIS
245 extensions |= EXTENSION_TABLES
246 extensions |= EXTENSION_FENCED_CODE
247 extensions |= EXTENSION_AUTOLINK
248 extensions |= EXTENSION_STRIKETHROUGH
249 extensions |= EXTENSION_SPACE_HEADERS
250 extensions |= EXTENSION_HEADER_IDS
251
252 return Markdown(input, renderer, extensions)
253}
254
255// Markdown is the main rendering function.
256// It parses and renders a block of markdown-encoded text.
257// The supplied Renderer is used to format the output, and extensions dictates
258// which non-standard extensions are enabled.
259//
260// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
261// LatexRenderer, respectively.
262func Markdown(input []byte, renderer Renderer, extensions int) []byte {
263 // no point in parsing if we can't render
264 if renderer == nil {
265 return nil
266 }
267
268 // fill in the render structure
269 p := new(parser)
270 p.r = renderer
271 p.flags = extensions
272 p.refs = make(map[string]*reference)
273 p.maxNesting = 16
274 p.insideLink = false
275
276 // register inline parsers
277 p.inlineCallback['*'] = emphasis
278 p.inlineCallback['_'] = emphasis
279 if extensions&EXTENSION_STRIKETHROUGH != 0 {
280 p.inlineCallback['~'] = emphasis
281 }
282 p.inlineCallback['`'] = codeSpan
283 p.inlineCallback['\n'] = lineBreak
284 p.inlineCallback['['] = link
285 p.inlineCallback['<'] = leftAngle
286 p.inlineCallback['\\'] = escape
287 p.inlineCallback['&'] = entity
288
289 if extensions&EXTENSION_AUTOLINK != 0 {
290 p.inlineCallback[':'] = autoLink
291 }
292
293 if extensions&EXTENSION_FOOTNOTES != 0 {
294 p.notes = make([]*reference, 0)
295 }
296
297 first := firstPass(p, input)
298 second := secondPass(p, first)
299
300 if renderer.GetFlags()&HTML_SANITIZE_OUTPUT != 0 {
301 second = sanitizeHtmlSafe(second)
302 }
303
304 return second
305}
306
307// first pass:
308// - extract references
309// - expand tabs
310// - normalize newlines
311// - copy everything else
312// - add missing newlines before fenced code blocks
313func firstPass(p *parser, input []byte) []byte {
314 var out bytes.Buffer
315 tabSize := TAB_SIZE_DEFAULT
316 if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
317 tabSize = TAB_SIZE_EIGHT
318 }
319 beg, end := 0, 0
320 lastLineWasBlank := false
321 lastFencedCodeBlockEnd := 0
322 for beg < len(input) { // iterate over lines
323 if end = isReference(p, input[beg:], tabSize); end > 0 {
324 beg += end
325 } else { // skip to the next line
326 end = beg
327 for end < len(input) && input[end] != '\n' && input[end] != '\r' {
328 end++
329 }
330
331 if p.flags&EXTENSION_FENCED_CODE != 0 {
332 // when last line was none blank and a fenced code block comes after
333 if beg >= lastFencedCodeBlockEnd {
334 // tmp var so we don't modify beyond bounds of `input`
335 var tmp = make([]byte, len(input[beg:]), len(input[beg:]) + 1)
336 copy(tmp, input[beg:])
337 if i := p.fencedCode(&out, append(tmp, '\n'), false); i > 0 {
338 if !lastLineWasBlank {
339 out.WriteByte('\n') // need to inject additional linebreak
340 }
341 lastFencedCodeBlockEnd = beg + i
342 }
343 }
344 lastLineWasBlank = end == beg
345 }
346
347 // add the line body if present
348 if end > beg {
349 if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
350 out.Write(input[beg:end])
351 } else {
352 expandTabs(&out, input[beg:end], tabSize)
353 }
354 }
355 out.WriteByte('\n')
356
357 if end < len(input) && input[end] == '\r' {
358 end++
359 }
360 if end < len(input) && input[end] == '\n' {
361 end++
362 }
363
364 beg = end
365 }
366 }
367
368 // empty input?
369 if out.Len() == 0 {
370 out.WriteByte('\n')
371 }
372
373 return out.Bytes()
374}
375
376// second pass: actual rendering
377func secondPass(p *parser, input []byte) []byte {
378 var output bytes.Buffer
379
380 p.r.DocumentHeader(&output)
381 p.block(&output, input)
382
383 if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
384 p.r.Footnotes(&output, func() bool {
385 flags := LIST_ITEM_BEGINNING_OF_LIST
386 for _, ref := range p.notes {
387 var buf bytes.Buffer
388 if ref.hasBlock {
389 flags |= LIST_ITEM_CONTAINS_BLOCK
390 p.block(&buf, ref.title)
391 } else {
392 p.inline(&buf, ref.title)
393 }
394 p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
395 flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
396 }
397
398 return true
399 })
400 }
401
402 p.r.DocumentFooter(&output)
403
404 if p.nesting != 0 {
405 panic("Nesting level did not end at zero")
406 }
407
408 return output.Bytes()
409}
410
411//
412// Link references
413//
414// This section implements support for references that (usually) appear
415// as footnotes in a document, and can be referenced anywhere in the document.
416// The basic format is:
417//
418// [1]: http://www.google.com/ "Google"
419// [2]: http://www.github.com/ "Github"
420//
421// Anywhere in the document, the reference can be linked by referring to its
422// label, i.e., 1 and 2 in this example, as in:
423//
424// This library is hosted on [Github][2], a git hosting site.
425//
426// Actual footnotes as specified in Pandoc and supported by some other Markdown
427// libraries such as php-markdown are also taken care of. They look like this:
428//
429// This sentence needs a bit of further explanation.[^note]
430//
431// [^note]: This is the explanation.
432//
433// Footnotes should be placed at the end of the document in an ordered list.
434// Inline footnotes such as:
435//
436// Inline footnotes^[Not supported.] also exist.
437//
438// are not yet supported.
439
440// References are parsed and stored in this struct.
441type reference struct {
442 link []byte
443 title []byte
444 noteId int // 0 if not a footnote ref
445 hasBlock bool
446}
447
448// Check whether or not data starts with a reference link.
449// If so, it is parsed and stored in the list of references
450// (in the render struct).
451// Returns the number of bytes to skip to move past it,
452// or zero if the first line is not a reference.
453func isReference(p *parser, data []byte, tabSize int) int {
454 // up to 3 optional leading spaces
455 if len(data) < 4 {
456 return 0
457 }
458 i := 0
459 for i < 3 && data[i] == ' ' {
460 i++
461 }
462
463 noteId := 0
464
465 // id part: anything but a newline between brackets
466 if data[i] != '[' {
467 return 0
468 }
469 i++
470 if p.flags&EXTENSION_FOOTNOTES != 0 {
471 if data[i] == '^' {
472 // we can set it to anything here because the proper noteIds will
473 // be assigned later during the second pass. It just has to be != 0
474 noteId = 1
475 i++
476 }
477 }
478 idOffset := i
479 for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
480 i++
481 }
482 if i >= len(data) || data[i] != ']' {
483 return 0
484 }
485 idEnd := i
486
487 // spacer: colon (space | tab)* newline? (space | tab)*
488 i++
489 if i >= len(data) || data[i] != ':' {
490 return 0
491 }
492 i++
493 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
494 i++
495 }
496 if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
497 i++
498 if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
499 i++
500 }
501 }
502 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
503 i++
504 }
505 if i >= len(data) {
506 return 0
507 }
508
509 var (
510 linkOffset, linkEnd int
511 titleOffset, titleEnd int
512 lineEnd int
513 raw []byte
514 hasBlock bool
515 )
516
517 if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
518 linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
519 lineEnd = linkEnd
520 } else {
521 linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
522 }
523 if lineEnd == 0 {
524 return 0
525 }
526
527 // a valid ref has been found
528
529 ref := &reference{
530 noteId: noteId,
531 hasBlock: hasBlock,
532 }
533
534 if noteId > 0 {
535 // reusing the link field for the id since footnotes don't have links
536 ref.link = data[idOffset:idEnd]
537 // if footnote, it's not really a title, it's the contained text
538 ref.title = raw
539 } else {
540 ref.link = data[linkOffset:linkEnd]
541 ref.title = data[titleOffset:titleEnd]
542 }
543
544 // id matches are case-insensitive
545 id := string(bytes.ToLower(data[idOffset:idEnd]))
546
547 p.refs[id] = ref
548
549 return lineEnd
550}
551
552func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
553 // link: whitespace-free sequence, optionally between angle brackets
554 if data[i] == '<' {
555 i++
556 }
557 linkOffset = i
558 for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
559 i++
560 }
561 linkEnd = i
562 if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
563 linkOffset++
564 linkEnd--
565 }
566
567 // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
568 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
569 i++
570 }
571 if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
572 return
573 }
574
575 // compute end-of-line
576 if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
577 lineEnd = i
578 }
579 if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
580 lineEnd++
581 }
582
583 // optional (space|tab)* spacer after a newline
584 if lineEnd > 0 {
585 i = lineEnd + 1
586 for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
587 i++
588 }
589 }
590
591 // optional title: any non-newline sequence enclosed in '"() alone on its line
592 if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
593 i++
594 titleOffset = i
595
596 // look for EOL
597 for i < len(data) && data[i] != '\n' && data[i] != '\r' {
598 i++
599 }
600 if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
601 titleEnd = i + 1
602 } else {
603 titleEnd = i
604 }
605
606 // step back
607 i--
608 for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
609 i--
610 }
611 if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
612 lineEnd = titleEnd
613 titleEnd = i
614 }
615 }
616
617 return
618}
619
620// The first bit of this logic is the same as (*parser).listItem, but the rest
621// is much simpler. This function simply finds the entire block and shifts it
622// over by one tab if it is indeed a block (just returns the line if it's not).
623// blockEnd is the end of the section in the input buffer, and contents is the
624// extracted text that was shifted over one tab. It will need to be rendered at
625// the end of the document.
626func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
627 if i == 0 || len(data) == 0 {
628 return
629 }
630
631 // skip leading whitespace on first line
632 for i < len(data) && data[i] == ' ' {
633 i++
634 }
635
636 blockStart = i
637
638 // find the end of the line
639 blockEnd = i
640 for i < len(data) && data[i-1] != '\n' {
641 i++
642 }
643
644 // get working buffer
645 var raw bytes.Buffer
646
647 // put the first line into the working buffer
648 raw.Write(data[blockEnd:i])
649 blockEnd = i
650
651 // process the following lines
652 containsBlankLine := false
653
654gatherLines:
655 for blockEnd < len(data) {
656 i++
657
658 // find the end of this line
659 for i < len(data) && data[i-1] != '\n' {
660 i++
661 }
662
663 // if it is an empty line, guess that it is part of this item
664 // and move on to the next line
665 if p.isEmpty(data[blockEnd:i]) > 0 {
666 containsBlankLine = true
667 blockEnd = i
668 continue
669 }
670
671 n := 0
672 if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
673 // this is the end of the block.
674 // we don't want to include this last line in the index.
675 break gatherLines
676 }
677
678 // if there were blank lines before this one, insert a new one now
679 if containsBlankLine {
680 raw.WriteByte('\n')
681 containsBlankLine = false
682 }
683
684 // get rid of that first tab, write to buffer
685 raw.Write(data[blockEnd+n : i])
686 hasBlock = true
687
688 blockEnd = i
689 }
690
691 if data[blockEnd-1] != '\n' {
692 raw.WriteByte('\n')
693 }
694
695 contents = raw.Bytes()
696
697 return
698}
699
700//
701//
702// Miscellaneous helper functions
703//
704//
705
706// Test if a character is a punctuation symbol.
707// Taken from a private function in regexp in the stdlib.
708func ispunct(c byte) bool {
709 for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
710 if c == r {
711 return true
712 }
713 }
714 return false
715}
716
717// Test if a character is a whitespace character.
718func isspace(c byte) bool {
719 return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
720}
721
722// Test if a character is letter.
723func isletter(c byte) bool {
724 return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
725}
726
727// Test if a character is a letter or a digit.
728// TODO: check when this is looking for ASCII alnum and when it should use unicode
729func isalnum(c byte) bool {
730 return (c >= '0' && c <= '9') || isletter(c)
731}
732
733// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
734// always ends output with a newline
735func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
736 // first, check for common cases: no tabs, or only tabs at beginning of line
737 i, prefix := 0, 0
738 slowcase := false
739 for i = 0; i < len(line); i++ {
740 if line[i] == '\t' {
741 if prefix == i {
742 prefix++
743 } else {
744 slowcase = true
745 break
746 }
747 }
748 }
749
750 // no need to decode runes if all tabs are at the beginning of the line
751 if !slowcase {
752 for i = 0; i < prefix*tabSize; i++ {
753 out.WriteByte(' ')
754 }
755 out.Write(line[prefix:])
756 return
757 }
758
759 // the slow case: we need to count runes to figure out how
760 // many spaces to insert for each tab
761 column := 0
762 i = 0
763 for i < len(line) {
764 start := i
765 for i < len(line) && line[i] != '\t' {
766 _, size := utf8.DecodeRune(line[i:])
767 i += size
768 column++
769 }
770
771 if i > start {
772 out.Write(line[start:i])
773 }
774
775 if i >= len(line) {
776 break
777 }
778
779 for {
780 out.WriteByte(' ')
781 column++
782 if column%tabSize == 0 {
783 break
784 }
785 }
786
787 i++
788 }
789}
790
791// Find if a line counts as indented or not.
792// Returns number of characters the indent is (0 = not indented).
793func isIndented(data []byte, indentSize int) int {
794 if len(data) == 0 {
795 return 0
796 }
797 if data[0] == '\t' {
798 return 1
799 }
800 if len(data) < indentSize {
801 return 0
802 }
803 for i := 0; i < indentSize; i++ {
804 if data[i] != ' ' {
805 return 0
806 }
807 }
808 return indentSize
809}
810
811// Create a url-safe slug for fragments
812func slugify(in []byte) []byte {
813 if len(in) == 0 {
814 return in
815 }
816 out := make([]byte, 0, len(in))
817 sym := false
818
819 for _, ch := range in {
820 if isalnum(ch) {
821 sym = false
822 out = append(out, ch)
823 } else if sym {
824 continue
825 } else {
826 out = append(out, '-')
827 sym = true
828 }
829 }
830 var a, b int
831 var ch byte
832 for a, ch = range out {
833 if ch != '-' {
834 break
835 }
836 }
837 for b = len(out) - 1; b > 0; b-- {
838 if out[b] != '-' {
839 break
840 }
841 }
842 return out[a : b+1]
843}