inline.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11// Functions to parse inline elements.
12//
13
14package blackfriday
15
16import (
17 "bytes"
18 "regexp"
19 "strconv"
20)
21
22var (
23 urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
24 anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
25)
26
27// Functions to parse text within a block
28// Each function returns the number of chars taken care of
29// data is the complete block being rendered
30// offset is the number of valid chars before the current cursor
31
32func (p *parser) inline(out *bytes.Buffer, data []byte) {
33 // this is called recursively: enforce a maximum depth
34 if p.nesting >= p.maxNesting {
35 return
36 }
37 p.nesting++
38
39 i, end := 0, 0
40 for i < len(data) {
41 // copy inactive chars into the output
42 for end < len(data) && p.inlineCallback[data[end]] == nil {
43 end++
44 }
45
46 p.r.NormalText(out, data[i:end])
47
48 if end >= len(data) {
49 break
50 }
51 i = end
52
53 // call the trigger
54 handler := p.inlineCallback[data[end]]
55 if consumed := handler(p, out, data, i); consumed == 0 {
56 // no action from the callback; buffer the byte for later
57 end = i + 1
58 } else {
59 // skip past whatever the callback used
60 i += consumed
61 end = i
62 }
63 }
64
65 p.nesting--
66}
67
68// single and double emphasis parsing
69func emphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
70 data = data[offset:]
71 c := data[0]
72 ret := 0
73
74 if len(data) > 2 && data[1] != c {
75 // whitespace cannot follow an opening emphasis;
76 // strikethrough only takes two characters '~~'
77 if c == '~' || isspace(data[1]) {
78 return 0
79 }
80 if ret = helperEmphasis(p, out, data[1:], c); ret == 0 {
81 return 0
82 }
83
84 return ret + 1
85 }
86
87 if len(data) > 3 && data[1] == c && data[2] != c {
88 if isspace(data[2]) {
89 return 0
90 }
91 if ret = helperDoubleEmphasis(p, out, data[2:], c); ret == 0 {
92 return 0
93 }
94
95 return ret + 2
96 }
97
98 if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
99 if c == '~' || isspace(data[3]) {
100 return 0
101 }
102 if ret = helperTripleEmphasis(p, out, data, 3, c); ret == 0 {
103 return 0
104 }
105
106 return ret + 3
107 }
108
109 return 0
110}
111
112func codeSpan(p *parser, out *bytes.Buffer, data []byte, offset int) int {
113 data = data[offset:]
114
115 nb := 0
116
117 // count the number of backticks in the delimiter
118 for nb < len(data) && data[nb] == '`' {
119 nb++
120 }
121
122 // find the next delimiter
123 i, end := 0, 0
124 for end = nb; end < len(data) && i < nb; end++ {
125 if data[end] == '`' {
126 i++
127 } else {
128 i = 0
129 }
130 }
131
132 // no matching delimiter?
133 if i < nb && end >= len(data) {
134 return 0
135 }
136
137 // trim outside whitespace
138 fBegin := nb
139 for fBegin < end && data[fBegin] == ' ' {
140 fBegin++
141 }
142
143 fEnd := end - nb
144 for fEnd > fBegin && data[fEnd-1] == ' ' {
145 fEnd--
146 }
147
148 // render the code span
149 if fBegin != fEnd {
150 p.r.CodeSpan(out, data[fBegin:fEnd])
151 }
152
153 return end
154
155}
156
157// newline preceded by two spaces becomes <br>
158// newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled
159func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
160 // remove trailing spaces from out
161 outBytes := out.Bytes()
162 end := len(outBytes)
163 eol := end
164 for eol > 0 && outBytes[eol-1] == ' ' {
165 eol--
166 }
167 out.Truncate(eol)
168
169 precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
170
171 // should there be a hard line break here?
172 if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces {
173 return 0
174 }
175
176 p.r.LineBreak(out)
177 return 1
178}
179
180type linkType int
181
182const (
183 linkNormal linkType = iota
184 linkImg
185 linkDeferredFootnote
186 linkInlineFootnote
187)
188
189// '[': parse a link or an image or a footnote
190func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
191 // no links allowed inside regular links, footnote, and deferred footnotes
192 if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
193 return 0
194 }
195
196 // [text] == regular link
197 // ![alt] == image
198 // ^[text] == inline footnote
199 // [^refId] == deferred footnote
200 var t linkType
201 if offset > 0 && data[offset-1] == '!' {
202 t = linkImg
203 } else if p.flags&EXTENSION_FOOTNOTES != 0 {
204 if offset > 0 && data[offset-1] == '^' {
205 t = linkInlineFootnote
206 } else if len(data)-1 > offset && data[offset+1] == '^' {
207 t = linkDeferredFootnote
208 }
209 }
210
211 data = data[offset:]
212
213 var (
214 i = 1
215 noteId int
216 title, link []byte
217 textHasNl = false
218 )
219
220 if t == linkDeferredFootnote {
221 i++
222 }
223
224 // look for the matching closing bracket
225 for level := 1; level > 0 && i < len(data); i++ {
226 switch {
227 case data[i] == '\n':
228 textHasNl = true
229
230 case data[i-1] == '\\':
231 continue
232
233 case data[i] == '[':
234 level++
235
236 case data[i] == ']':
237 level--
238 if level <= 0 {
239 i-- // compensate for extra i++ in for loop
240 }
241 }
242 }
243
244 if i >= len(data) {
245 return 0
246 }
247
248 txtE := i
249 i++
250
251 // skip any amount of whitespace or newline
252 // (this is much more lax than original markdown syntax)
253 for i < len(data) && isspace(data[i]) {
254 i++
255 }
256
257 // inline style link
258 switch {
259 case i < len(data) && data[i] == '(':
260 // skip initial whitespace
261 i++
262
263 for i < len(data) && isspace(data[i]) {
264 i++
265 }
266
267 linkB := i
268
269 // look for link end: ' " )
270 findlinkend:
271 for i < len(data) {
272 switch {
273 case data[i] == '\\':
274 i += 2
275
276 case data[i] == ')' || data[i] == '\'' || data[i] == '"':
277 break findlinkend
278
279 default:
280 i++
281 }
282 }
283
284 if i >= len(data) {
285 return 0
286 }
287 linkE := i
288
289 // look for title end if present
290 titleB, titleE := 0, 0
291 if data[i] == '\'' || data[i] == '"' {
292 i++
293 titleB = i
294
295 findtitleend:
296 for i < len(data) {
297 switch {
298 case data[i] == '\\':
299 i += 2
300
301 case data[i] == ')':
302 break findtitleend
303
304 default:
305 i++
306 }
307 }
308
309 if i >= len(data) {
310 return 0
311 }
312
313 // skip whitespace after title
314 titleE = i - 1
315 for titleE > titleB && isspace(data[titleE]) {
316 titleE--
317 }
318
319 // check for closing quote presence
320 if data[titleE] != '\'' && data[titleE] != '"' {
321 titleB, titleE = 0, 0
322 linkE = i
323 }
324 }
325
326 // remove whitespace at the end of the link
327 for linkE > linkB && isspace(data[linkE-1]) {
328 linkE--
329 }
330
331 // remove optional angle brackets around the link
332 if data[linkB] == '<' {
333 linkB++
334 }
335 if data[linkE-1] == '>' {
336 linkE--
337 }
338
339 // build escaped link and title
340 if linkE > linkB {
341 link = data[linkB:linkE]
342 }
343
344 if titleE > titleB {
345 title = data[titleB:titleE]
346 }
347
348 i++
349
350 // reference style link
351 case i < len(data)-1 && data[i] == '[' && data[i+1] != '^':
352 var id []byte
353
354 // look for the id
355 i++
356 linkB := i
357 for i < len(data) && data[i] != ']' {
358 i++
359 }
360 if i >= len(data) {
361 return 0
362 }
363 linkE := i
364
365 // find the reference
366 if linkB == linkE {
367 if textHasNl {
368 var b bytes.Buffer
369
370 for j := 1; j < txtE; j++ {
371 switch {
372 case data[j] != '\n':
373 b.WriteByte(data[j])
374 case data[j-1] != ' ':
375 b.WriteByte(' ')
376 }
377 }
378
379 id = b.Bytes()
380 } else {
381 id = data[1:txtE]
382 }
383 } else {
384 id = data[linkB:linkE]
385 }
386
387 // find the reference with matching id (ids are case-insensitive)
388 key := string(bytes.ToLower(id))
389 lr, ok := p.refs[key]
390 if !ok {
391 return 0
392
393 }
394
395 // keep link and title from reference
396 link = lr.link
397 title = lr.title
398 i++
399
400 // shortcut reference style link or reference or inline footnote
401 default:
402 var id []byte
403
404 // craft the id
405 if textHasNl {
406 var b bytes.Buffer
407
408 for j := 1; j < txtE; j++ {
409 switch {
410 case data[j] != '\n':
411 b.WriteByte(data[j])
412 case data[j-1] != ' ':
413 b.WriteByte(' ')
414 }
415 }
416
417 id = b.Bytes()
418 } else {
419 if t == linkDeferredFootnote {
420 id = data[2:txtE] // get rid of the ^
421 } else {
422 id = data[1:txtE]
423 }
424 }
425
426 key := string(bytes.ToLower(id))
427 if t == linkInlineFootnote {
428 // create a new reference
429 noteId = len(p.notes) + 1
430
431 var fragment []byte
432 if len(id) > 0 {
433 if len(id) < 16 {
434 fragment = make([]byte, len(id))
435 } else {
436 fragment = make([]byte, 16)
437 }
438 copy(fragment, slugify(id))
439 } else {
440 fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
441 }
442
443 ref := &reference{
444 noteId: noteId,
445 hasBlock: false,
446 link: fragment,
447 title: id,
448 }
449
450 p.notes = append(p.notes, ref)
451
452 link = ref.link
453 title = ref.title
454 } else {
455 // find the reference with matching id
456 lr, ok := p.refs[key]
457 if !ok {
458 return 0
459 }
460
461 if t == linkDeferredFootnote {
462 lr.noteId = len(p.notes) + 1
463 p.notes = append(p.notes, lr)
464 }
465
466 // keep link and title from reference
467 link = lr.link
468 // if inline footnote, title == footnote contents
469 title = lr.title
470 noteId = lr.noteId
471 }
472
473 // rewind the whitespace
474 i = txtE + 1
475 }
476
477 // build content: img alt is escaped, link content is parsed
478 var content bytes.Buffer
479 if txtE > 1 {
480 if t == linkImg {
481 content.Write(data[1:txtE])
482 } else {
483 // links cannot contain other links, so turn off link parsing temporarily
484 insideLink := p.insideLink
485 p.insideLink = true
486 p.inline(&content, data[1:txtE])
487 p.insideLink = insideLink
488 }
489 }
490
491 var uLink []byte
492 if t == linkNormal || t == linkImg {
493 if len(link) > 0 {
494 var uLinkBuf bytes.Buffer
495 unescapeText(&uLinkBuf, link)
496 uLink = uLinkBuf.Bytes()
497 }
498
499 // links need something to click on and somewhere to go
500 if len(uLink) == 0 || (t == linkNormal && content.Len() == 0) {
501 return 0
502 }
503 }
504
505 // call the relevant rendering function
506 switch t {
507 case linkNormal:
508 p.r.Link(out, uLink, title, content.Bytes())
509
510 case linkImg:
511 outSize := out.Len()
512 outBytes := out.Bytes()
513 if outSize > 0 && outBytes[outSize-1] == '!' {
514 out.Truncate(outSize - 1)
515 }
516
517 p.r.Image(out, uLink, title, content.Bytes())
518
519 case linkInlineFootnote:
520 outSize := out.Len()
521 outBytes := out.Bytes()
522 if outSize > 0 && outBytes[outSize-1] == '^' {
523 out.Truncate(outSize - 1)
524 }
525
526 p.r.FootnoteRef(out, link, noteId)
527
528 case linkDeferredFootnote:
529 p.r.FootnoteRef(out, link, noteId)
530
531 default:
532 return 0
533 }
534
535 return i
536}
537
538// '<' when tags or autolinks are allowed
539func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
540 data = data[offset:]
541 altype := LINK_TYPE_NOT_AUTOLINK
542 end := tagLength(data, &altype)
543
544 if end > 2 {
545 if altype != LINK_TYPE_NOT_AUTOLINK {
546 var uLink bytes.Buffer
547 unescapeText(&uLink, data[1:end+1-2])
548 if uLink.Len() > 0 {
549 p.r.AutoLink(out, uLink.Bytes(), altype)
550 }
551 } else {
552 p.r.RawHtmlTag(out, data[:end])
553 }
554 }
555
556 return end
557}
558
559// '\\' backslash escape
560var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
561
562func escape(p *parser, out *bytes.Buffer, data []byte, offset int) int {
563 data = data[offset:]
564
565 if len(data) > 1 {
566 if bytes.IndexByte(escapeChars, data[1]) < 0 {
567 return 0
568 }
569
570 p.r.NormalText(out, data[1:2])
571 }
572
573 return 2
574}
575
576func unescapeText(ob *bytes.Buffer, src []byte) {
577 i := 0
578 for i < len(src) {
579 org := i
580 for i < len(src) && src[i] != '\\' {
581 i++
582 }
583
584 if i > org {
585 ob.Write(src[org:i])
586 }
587
588 if i+1 >= len(src) {
589 break
590 }
591
592 ob.WriteByte(src[i+1])
593 i += 2
594 }
595}
596
597// '&' escaped when it doesn't belong to an entity
598// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
599func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
600 data = data[offset:]
601
602 end := 1
603
604 if end < len(data) && data[end] == '#' {
605 end++
606 }
607
608 for end < len(data) && isalnum(data[end]) {
609 end++
610 }
611
612 if end < len(data) && data[end] == ';' {
613 end++ // real entity
614 } else {
615 return 0 // lone '&'
616 }
617
618 p.r.Entity(out, data[:end])
619
620 return end
621}
622
623func linkEndsWithEntity(data []byte, linkEnd int) bool {
624 entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
625 if entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd {
626 return true
627 }
628 return false
629}
630
631func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
632 // quick check to rule out most false hits on ':'
633 if p.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' {
634 return 0
635 }
636
637 // Now a more expensive check to see if we're not inside an anchor element
638 anchorStart := offset
639 offsetFromAnchor := 0
640 for anchorStart > 0 && data[anchorStart] != '<' {
641 anchorStart--
642 offsetFromAnchor++
643 }
644
645 anchorStr := anchorRe.Find(data[anchorStart:])
646 if anchorStr != nil {
647 out.Write(anchorStr[offsetFromAnchor:])
648 return len(anchorStr) - offsetFromAnchor
649 }
650
651 // scan backward for a word boundary
652 rewind := 0
653 for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
654 rewind++
655 }
656 if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
657 return 0
658 }
659
660 origData := data
661 data = data[offset-rewind:]
662
663 if !isSafeLink(data) {
664 return 0
665 }
666
667 linkEnd := 0
668 for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
669 linkEnd++
670 }
671
672 // Skip punctuation at the end of the link
673 if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
674 linkEnd--
675 }
676
677 // But don't skip semicolon if it's a part of escaped entity:
678 if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
679 linkEnd--
680 }
681
682 // See if the link finishes with a punctuation sign that can be closed.
683 var copen byte
684 switch data[linkEnd-1] {
685 case '"':
686 copen = '"'
687 case '\'':
688 copen = '\''
689 case ')':
690 copen = '('
691 case ']':
692 copen = '['
693 case '}':
694 copen = '{'
695 default:
696 copen = 0
697 }
698
699 if copen != 0 {
700 bufEnd := offset - rewind + linkEnd - 2
701
702 openDelim := 1
703
704 /* Try to close the final punctuation sign in this same line;
705 * if we managed to close it outside of the URL, that means that it's
706 * not part of the URL. If it closes inside the URL, that means it
707 * is part of the URL.
708 *
709 * Examples:
710 *
711 * foo http://www.pokemon.com/Pikachu_(Electric) bar
712 * => http://www.pokemon.com/Pikachu_(Electric)
713 *
714 * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
715 * => http://www.pokemon.com/Pikachu_(Electric)
716 *
717 * foo http://www.pokemon.com/Pikachu_(Electric)) bar
718 * => http://www.pokemon.com/Pikachu_(Electric))
719 *
720 * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
721 * => foo http://www.pokemon.com/Pikachu_(Electric)
722 */
723
724 for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
725 if origData[bufEnd] == data[linkEnd-1] {
726 openDelim++
727 }
728
729 if origData[bufEnd] == copen {
730 openDelim--
731 }
732
733 bufEnd--
734 }
735
736 if openDelim == 0 {
737 linkEnd--
738 }
739 }
740
741 // we were triggered on the ':', so we need to rewind the output a bit
742 if out.Len() >= rewind {
743 out.Truncate(len(out.Bytes()) - rewind)
744 }
745
746 var uLink bytes.Buffer
747 unescapeText(&uLink, data[:linkEnd])
748
749 if uLink.Len() > 0 {
750 p.r.AutoLink(out, uLink.Bytes(), LINK_TYPE_NORMAL)
751 }
752
753 return linkEnd - rewind
754}
755
756func isEndOfLink(char byte) bool {
757 return isspace(char) || char == '<'
758}
759
760var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
761var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
762
763func isSafeLink(link []byte) bool {
764 for _, path := range validPaths {
765 if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
766 if len(link) == len(path) {
767 return true
768 } else if isalnum(link[len(path)]) {
769 return true
770 }
771 }
772 }
773
774 for _, prefix := range validUris {
775 // TODO: handle unicode here
776 // case-insensitive prefix test
777 if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
778 return true
779 }
780 }
781
782 return false
783}
784
785// return the length of the given tag, or 0 is it's not valid
786func tagLength(data []byte, autolink *int) int {
787 var i, j int
788
789 // a valid tag can't be shorter than 3 chars
790 if len(data) < 3 {
791 return 0
792 }
793
794 // begins with a '<' optionally followed by '/', followed by letter or number
795 if data[0] != '<' {
796 return 0
797 }
798 if data[1] == '/' {
799 i = 2
800 } else {
801 i = 1
802 }
803
804 if !isalnum(data[i]) {
805 return 0
806 }
807
808 // scheme test
809 *autolink = LINK_TYPE_NOT_AUTOLINK
810
811 // try to find the beginning of an URI
812 for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
813 i++
814 }
815
816 if i > 1 && i < len(data) && data[i] == '@' {
817 if j = isMailtoAutoLink(data[i:]); j != 0 {
818 *autolink = LINK_TYPE_EMAIL
819 return i + j
820 }
821 }
822
823 if i > 2 && i < len(data) && data[i] == ':' {
824 *autolink = LINK_TYPE_NORMAL
825 i++
826 }
827
828 // complete autolink test: no whitespace or ' or "
829 switch {
830 case i >= len(data):
831 *autolink = LINK_TYPE_NOT_AUTOLINK
832 case *autolink != 0:
833 j = i
834
835 for i < len(data) {
836 if data[i] == '\\' {
837 i += 2
838 } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
839 break
840 } else {
841 i++
842 }
843
844 }
845
846 if i >= len(data) {
847 return 0
848 }
849 if i > j && data[i] == '>' {
850 return i + 1
851 }
852
853 // one of the forbidden chars has been found
854 *autolink = LINK_TYPE_NOT_AUTOLINK
855 }
856
857 // look for something looking like a tag end
858 for i < len(data) && data[i] != '>' {
859 i++
860 }
861 if i >= len(data) {
862 return 0
863 }
864 return i + 1
865}
866
867// look for the address part of a mail autolink and '>'
868// this is less strict than the original markdown e-mail address matching
869func isMailtoAutoLink(data []byte) int {
870 nb := 0
871
872 // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
873 for i := 0; i < len(data); i++ {
874 if isalnum(data[i]) {
875 continue
876 }
877
878 switch data[i] {
879 case '@':
880 nb++
881
882 case '-', '.', '_':
883 break
884
885 case '>':
886 if nb == 1 {
887 return i + 1
888 } else {
889 return 0
890 }
891 default:
892 return 0
893 }
894 }
895
896 return 0
897}
898
899// look for the next emph char, skipping other constructs
900func helperFindEmphChar(data []byte, c byte) int {
901 i := 1
902
903 for i < len(data) {
904 for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
905 i++
906 }
907 if i >= len(data) {
908 return 0
909 }
910 if data[i] == c {
911 return i
912 }
913
914 // do not count escaped chars
915 if i != 0 && data[i-1] == '\\' {
916 i++
917 continue
918 }
919
920 if data[i] == '`' {
921 // skip a code span
922 tmpI := 0
923 i++
924 for i < len(data) && data[i] != '`' {
925 if tmpI == 0 && data[i] == c {
926 tmpI = i
927 }
928 i++
929 }
930 if i >= len(data) {
931 return tmpI
932 }
933 i++
934 } else if data[i] == '[' {
935 // skip a link
936 tmpI := 0
937 i++
938 for i < len(data) && data[i] != ']' {
939 if tmpI == 0 && data[i] == c {
940 tmpI = i
941 }
942 i++
943 }
944 i++
945 for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
946 i++
947 }
948 if i >= len(data) {
949 return tmpI
950 }
951 if data[i] != '[' && data[i] != '(' { // not a link
952 if tmpI > 0 {
953 return tmpI
954 } else {
955 continue
956 }
957 }
958 cc := data[i]
959 i++
960 for i < len(data) && data[i] != cc {
961 if tmpI == 0 && data[i] == c {
962 return i
963 }
964 i++
965 }
966 if i >= len(data) {
967 return tmpI
968 }
969 i++
970 }
971 }
972 return 0
973}
974
975func helperEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
976 i := 0
977
978 // skip one symbol if coming from emph3
979 if len(data) > 1 && data[0] == c && data[1] == c {
980 i = 1
981 }
982
983 for i < len(data) {
984 length := helperFindEmphChar(data[i:], c)
985 if length == 0 {
986 return 0
987 }
988 i += length
989 if i >= len(data) {
990 return 0
991 }
992
993 if i+1 < len(data) && data[i+1] == c {
994 i++
995 continue
996 }
997
998 if data[i] == c && !isspace(data[i-1]) {
999
1000 if p.flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
1001 if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
1002 continue
1003 }
1004 }
1005
1006 var work bytes.Buffer
1007 p.inline(&work, data[:i])
1008 p.r.Emphasis(out, work.Bytes())
1009 return i + 1
1010 }
1011 }
1012
1013 return 0
1014}
1015
1016func helperDoubleEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
1017 i := 0
1018
1019 for i < len(data) {
1020 length := helperFindEmphChar(data[i:], c)
1021 if length == 0 {
1022 return 0
1023 }
1024 i += length
1025
1026 if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
1027 var work bytes.Buffer
1028 p.inline(&work, data[:i])
1029
1030 if work.Len() > 0 {
1031 // pick the right renderer
1032 if c == '~' {
1033 p.r.StrikeThrough(out, work.Bytes())
1034 } else {
1035 p.r.DoubleEmphasis(out, work.Bytes())
1036 }
1037 }
1038 return i + 2
1039 }
1040 i++
1041 }
1042 return 0
1043}
1044
1045func helperTripleEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int, c byte) int {
1046 i := 0
1047 origData := data
1048 data = data[offset:]
1049
1050 for i < len(data) {
1051 length := helperFindEmphChar(data[i:], c)
1052 if length == 0 {
1053 return 0
1054 }
1055 i += length
1056
1057 // skip whitespace preceded symbols
1058 if data[i] != c || isspace(data[i-1]) {
1059 continue
1060 }
1061
1062 switch {
1063 case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
1064 // triple symbol found
1065 var work bytes.Buffer
1066
1067 p.inline(&work, data[:i])
1068 if work.Len() > 0 {
1069 p.r.TripleEmphasis(out, work.Bytes())
1070 }
1071 return i + 3
1072 case (i+1 < len(data) && data[i+1] == c):
1073 // double symbol found, hand over to emph1
1074 length = helperEmphasis(p, out, origData[offset-2:], c)
1075 if length == 0 {
1076 return 0
1077 } else {
1078 return length - 2
1079 }
1080 default:
1081 // single symbol found, hand over to emph2
1082 length = helperDoubleEmphasis(p, out, origData[offset-1:], c)
1083 if length == 0 {
1084 return 0
1085 } else {
1086 return length - 1
1087 }
1088 }
1089 }
1090 return 0
1091}