inline.go (view raw)
1//
2// Blackfriday Markdown Processor
3// Available at http://github.com/russross/blackfriday
4//
5// Copyright © 2011 Russ Ross <russ@russross.com>.
6// Distributed under the Simplified BSD License.
7// See README.md for details.
8//
9
10//
11// Functions to parse inline elements.
12//
13
14package blackfriday
15
16import (
17 "bytes"
18 "regexp"
19 "strconv"
20)
21
22var (
23 urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
24 anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
25)
26
27// Functions to parse text within a block
28// Each function returns the number of chars taken care of
29// data is the complete block being rendered
30// offset is the number of valid chars before the current cursor
31
32func (p *parser) inline(out *bytes.Buffer, data []byte) {
33 // this is called recursively: enforce a maximum depth
34 if p.nesting >= p.maxNesting {
35 return
36 }
37 p.nesting++
38
39 i, end := 0, 0
40 for i < len(data) {
41 // copy inactive chars into the output
42 for end < len(data) && p.inlineCallback[data[end]] == nil {
43 end++
44 }
45
46 p.r.NormalText(out, data[i:end])
47
48 if end >= len(data) {
49 break
50 }
51 i = end
52
53 // call the trigger
54 handler := p.inlineCallback[data[end]]
55 if consumed := handler(p, out, data, i); consumed == 0 {
56 // no action from the callback; buffer the byte for later
57 end = i + 1
58 } else {
59 // skip past whatever the callback used
60 i += consumed
61 end = i
62 }
63 }
64
65 p.nesting--
66}
67
68// single and double emphasis parsing
69func emphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
70 data = data[offset:]
71 c := data[0]
72 ret := 0
73
74 if len(data) > 2 && data[1] != c {
75 // whitespace cannot follow an opening emphasis;
76 // strikethrough only takes two characters '~~'
77 if c == '~' || isspace(data[1]) {
78 return 0
79 }
80 if ret = helperEmphasis(p, out, data[1:], c); ret == 0 {
81 return 0
82 }
83
84 return ret + 1
85 }
86
87 if len(data) > 3 && data[1] == c && data[2] != c {
88 if isspace(data[2]) {
89 return 0
90 }
91 if ret = helperDoubleEmphasis(p, out, data[2:], c); ret == 0 {
92 return 0
93 }
94
95 return ret + 2
96 }
97
98 if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
99 if c == '~' || isspace(data[3]) {
100 return 0
101 }
102 if ret = helperTripleEmphasis(p, out, data, 3, c); ret == 0 {
103 return 0
104 }
105
106 return ret + 3
107 }
108
109 return 0
110}
111
112func codeSpan(p *parser, out *bytes.Buffer, data []byte, offset int) int {
113 data = data[offset:]
114
115 nb := 0
116
117 // count the number of backticks in the delimiter
118 for nb < len(data) && data[nb] == '`' {
119 nb++
120 }
121
122 // find the next delimiter
123 i, end := 0, 0
124 for end = nb; end < len(data) && i < nb; end++ {
125 if data[end] == '`' {
126 i++
127 } else {
128 i = 0
129 }
130 }
131
132 // no matching delimiter?
133 if i < nb && end >= len(data) {
134 return 0
135 }
136
137 // trim outside whitespace
138 fBegin := nb
139 for fBegin < end && data[fBegin] == ' ' {
140 fBegin++
141 }
142
143 fEnd := end - nb
144 for fEnd > fBegin && data[fEnd-1] == ' ' {
145 fEnd--
146 }
147
148 // render the code span
149 if fBegin != fEnd {
150 p.r.CodeSpan(out, data[fBegin:fEnd])
151 }
152
153 return end
154
155}
156
157// newline preceded by two spaces becomes <br>
158// newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled
159func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
160 // remove trailing spaces from out
161 outBytes := out.Bytes()
162 end := len(outBytes)
163 eol := end
164 for eol > 0 && outBytes[eol-1] == ' ' {
165 eol--
166 }
167 out.Truncate(eol)
168
169 precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
170 precededByBackslash := offset >= 1 && data[offset-1] == '\\' // see http://spec.commonmark.org/0.18/#example-527
171 precededByBackslash = precededByBackslash && p.flags&EXTENSION_BACKSLASH_LINE_BREAK != 0
172
173 // should there be a hard line break here?
174 if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces && !precededByBackslash {
175 return 0
176 }
177
178 if precededByBackslash && eol > 0 {
179 out.Truncate(eol - 1)
180 }
181 p.r.LineBreak(out)
182 return 1
183}
184
185type linkType int
186
187const (
188 linkNormal linkType = iota
189 linkImg
190 linkDeferredFootnote
191 linkInlineFootnote
192)
193
194// '[': parse a link or an image or a footnote
195func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
196 // no links allowed inside regular links, footnote, and deferred footnotes
197 if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
198 return 0
199 }
200
201 // [text] == regular link
202 // ![alt] == image
203 // ^[text] == inline footnote
204 // [^refId] == deferred footnote
205 var t linkType
206 if offset > 0 && data[offset-1] == '!' {
207 t = linkImg
208 } else if p.flags&EXTENSION_FOOTNOTES != 0 {
209 if offset > 0 && data[offset-1] == '^' {
210 t = linkInlineFootnote
211 } else if len(data)-1 > offset && data[offset+1] == '^' {
212 t = linkDeferredFootnote
213 }
214 }
215
216 data = data[offset:]
217
218 var (
219 i = 1
220 noteId int
221 title, link, alt_content []byte
222 textHasNl = false
223 )
224
225 if t == linkDeferredFootnote {
226 i++
227 }
228
229 // look for the matching closing bracket
230 for level := 1; level > 0 && i < len(data); i++ {
231 switch {
232 case data[i] == '\n':
233 textHasNl = true
234
235 case data[i-1] == '\\':
236 continue
237
238 case data[i] == '[':
239 level++
240
241 case data[i] == ']':
242 level--
243 if level <= 0 {
244 i-- // compensate for extra i++ in for loop
245 }
246 }
247 }
248
249 if i >= len(data) {
250 return 0
251 }
252
253 txtE := i
254 i++
255
256 // skip any amount of whitespace or newline
257 // (this is much more lax than original markdown syntax)
258 for i < len(data) && isspace(data[i]) {
259 i++
260 }
261
262 // inline style link
263 switch {
264 case i < len(data) && data[i] == '(':
265 // skip initial whitespace
266 i++
267
268 for i < len(data) && isspace(data[i]) {
269 i++
270 }
271
272 linkB := i
273
274 // look for link end: ' " )
275 findlinkend:
276 for i < len(data) {
277 switch {
278 case data[i] == '\\':
279 i += 2
280
281 case data[i] == ')' || data[i] == '\'' || data[i] == '"':
282 break findlinkend
283
284 default:
285 i++
286 }
287 }
288
289 if i >= len(data) {
290 return 0
291 }
292 linkE := i
293
294 // look for title end if present
295 titleB, titleE := 0, 0
296 if data[i] == '\'' || data[i] == '"' {
297 i++
298 titleB = i
299
300 findtitleend:
301 for i < len(data) {
302 switch {
303 case data[i] == '\\':
304 i += 2
305
306 case data[i] == ')':
307 break findtitleend
308
309 default:
310 i++
311 }
312 }
313
314 if i >= len(data) {
315 return 0
316 }
317
318 // skip whitespace after title
319 titleE = i - 1
320 for titleE > titleB && isspace(data[titleE]) {
321 titleE--
322 }
323
324 // check for closing quote presence
325 if data[titleE] != '\'' && data[titleE] != '"' {
326 titleB, titleE = 0, 0
327 linkE = i
328 }
329 }
330
331 // remove whitespace at the end of the link
332 for linkE > linkB && isspace(data[linkE-1]) {
333 linkE--
334 }
335
336 // remove optional angle brackets around the link
337 if data[linkB] == '<' {
338 linkB++
339 }
340 if data[linkE-1] == '>' {
341 linkE--
342 }
343
344 // build escaped link and title
345 if linkE > linkB {
346 link = data[linkB:linkE]
347 }
348
349 if titleE > titleB {
350 title = data[titleB:titleE]
351 }
352
353 i++
354
355 // reference style link
356 case i < len(data)-1 && data[i] == '[' && data[i+1] != '^':
357 var id []byte
358 alt_content_considered := false
359
360 // look for the id
361 i++
362 linkB := i
363 for i < len(data) && data[i] != ']' {
364 i++
365 }
366 if i >= len(data) {
367 return 0
368 }
369 linkE := i
370
371 // find the reference
372 if linkB == linkE {
373 if textHasNl {
374 var b bytes.Buffer
375
376 for j := 1; j < txtE; j++ {
377 switch {
378 case data[j] != '\n':
379 b.WriteByte(data[j])
380 case data[j-1] != ' ':
381 b.WriteByte(' ')
382 }
383 }
384
385 id = b.Bytes()
386 } else {
387 id = data[1:txtE]
388 alt_content_considered = true
389 }
390 } else {
391 id = data[linkB:linkE]
392 }
393
394 // find the reference with matching id
395 lr, ok := p.getRef(string(id))
396 if !ok {
397 return 0
398
399 }
400
401 // keep link and title from reference
402 link = lr.link
403 title = lr.title
404 if alt_content_considered {
405 alt_content = lr.text
406 }
407 i++
408
409 // shortcut reference style link or reference or inline footnote
410 default:
411 var id []byte
412
413 // craft the id
414 if textHasNl {
415 var b bytes.Buffer
416
417 for j := 1; j < txtE; j++ {
418 switch {
419 case data[j] != '\n':
420 b.WriteByte(data[j])
421 case data[j-1] != ' ':
422 b.WriteByte(' ')
423 }
424 }
425
426 id = b.Bytes()
427 } else {
428 if t == linkDeferredFootnote {
429 id = data[2:txtE] // get rid of the ^
430 } else {
431 id = data[1:txtE]
432 }
433 }
434
435 if t == linkInlineFootnote {
436 // create a new reference
437 noteId = len(p.notes) + 1
438
439 var fragment []byte
440 if len(id) > 0 {
441 if len(id) < 16 {
442 fragment = make([]byte, len(id))
443 } else {
444 fragment = make([]byte, 16)
445 }
446 copy(fragment, slugify(id))
447 } else {
448 fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
449 }
450
451 ref := &reference{
452 noteId: noteId,
453 hasBlock: false,
454 link: fragment,
455 title: id,
456 }
457
458 p.notes = append(p.notes, ref)
459
460 link = ref.link
461 title = ref.title
462 } else {
463 // find the reference with matching id
464 lr, ok := p.getRef(string(id))
465 if !ok {
466 return 0
467 }
468
469 if t == linkDeferredFootnote {
470 lr.noteId = len(p.notes) + 1
471 p.notes = append(p.notes, lr)
472 }
473
474 // keep link and title from reference
475 link = lr.link
476 // if inline footnote, title == footnote contents
477 title = lr.title
478 noteId = lr.noteId
479 }
480
481 // rewind the whitespace
482 i = txtE + 1
483 }
484
485 // build content: img alt is escaped, link content is parsed
486 var content bytes.Buffer
487 if txtE > 1 {
488 if t == linkImg {
489 content.Write(data[1:txtE])
490 } else {
491 // links cannot contain other links, so turn off link parsing temporarily
492 insideLink := p.insideLink
493 p.insideLink = true
494 p.inline(&content, data[1:txtE])
495 p.insideLink = insideLink
496 }
497 }
498
499 var uLink []byte
500 if t == linkNormal || t == linkImg {
501 if len(link) > 0 {
502 var uLinkBuf bytes.Buffer
503 unescapeText(&uLinkBuf, link)
504 uLink = uLinkBuf.Bytes()
505 }
506
507 // links need something to click on and somewhere to go
508 if len(uLink) == 0 || (t == linkNormal && content.Len() == 0) {
509 return 0
510 }
511 }
512
513 // call the relevant rendering function
514 switch t {
515 case linkNormal:
516 if len(alt_content) > 0 {
517 p.r.Link(out, uLink, title, alt_content)
518 } else {
519 p.r.Link(out, uLink, title, content.Bytes())
520 }
521
522 case linkImg:
523 outSize := out.Len()
524 outBytes := out.Bytes()
525 if outSize > 0 && outBytes[outSize-1] == '!' {
526 out.Truncate(outSize - 1)
527 }
528
529 p.r.Image(out, uLink, title, content.Bytes())
530
531 case linkInlineFootnote:
532 outSize := out.Len()
533 outBytes := out.Bytes()
534 if outSize > 0 && outBytes[outSize-1] == '^' {
535 out.Truncate(outSize - 1)
536 }
537
538 p.r.FootnoteRef(out, link, noteId)
539
540 case linkDeferredFootnote:
541 p.r.FootnoteRef(out, link, noteId)
542
543 default:
544 return 0
545 }
546
547 return i
548}
549
550// '<' when tags or autolinks are allowed
551func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
552 data = data[offset:]
553 altype := LINK_TYPE_NOT_AUTOLINK
554 end := tagLength(data, &altype)
555
556 if end > 2 {
557 if altype != LINK_TYPE_NOT_AUTOLINK {
558 var uLink bytes.Buffer
559 unescapeText(&uLink, data[1:end+1-2])
560 if uLink.Len() > 0 {
561 p.r.AutoLink(out, uLink.Bytes(), altype)
562 }
563 } else {
564 p.r.RawHtmlTag(out, data[:end])
565 }
566 }
567
568 return end
569}
570
571// '\\' backslash escape
572var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
573
574func escape(p *parser, out *bytes.Buffer, data []byte, offset int) int {
575 data = data[offset:]
576
577 if len(data) > 1 {
578 if bytes.IndexByte(escapeChars, data[1]) < 0 {
579 return 0
580 }
581
582 p.r.NormalText(out, data[1:2])
583 }
584
585 return 2
586}
587
588func unescapeText(ob *bytes.Buffer, src []byte) {
589 i := 0
590 for i < len(src) {
591 org := i
592 for i < len(src) && src[i] != '\\' {
593 i++
594 }
595
596 if i > org {
597 ob.Write(src[org:i])
598 }
599
600 if i+1 >= len(src) {
601 break
602 }
603
604 ob.WriteByte(src[i+1])
605 i += 2
606 }
607}
608
609// '&' escaped when it doesn't belong to an entity
610// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
611func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
612 data = data[offset:]
613
614 end := 1
615
616 if end < len(data) && data[end] == '#' {
617 end++
618 }
619
620 for end < len(data) && isalnum(data[end]) {
621 end++
622 }
623
624 if end < len(data) && data[end] == ';' {
625 end++ // real entity
626 } else {
627 return 0 // lone '&'
628 }
629
630 p.r.Entity(out, data[:end])
631
632 return end
633}
634
635func linkEndsWithEntity(data []byte, linkEnd int) bool {
636 entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
637 if entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd {
638 return true
639 }
640 return false
641}
642
643func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
644 // quick check to rule out most false hits on ':'
645 if p.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' {
646 return 0
647 }
648
649 // Now a more expensive check to see if we're not inside an anchor element
650 anchorStart := offset
651 offsetFromAnchor := 0
652 for anchorStart > 0 && data[anchorStart] != '<' {
653 anchorStart--
654 offsetFromAnchor++
655 }
656
657 anchorStr := anchorRe.Find(data[anchorStart:])
658 if anchorStr != nil {
659 out.Write(anchorStr[offsetFromAnchor:])
660 return len(anchorStr) - offsetFromAnchor
661 }
662
663 // scan backward for a word boundary
664 rewind := 0
665 for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
666 rewind++
667 }
668 if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
669 return 0
670 }
671
672 origData := data
673 data = data[offset-rewind:]
674
675 if !isSafeLink(data) {
676 return 0
677 }
678
679 linkEnd := 0
680 for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
681 linkEnd++
682 }
683
684 // Skip punctuation at the end of the link
685 if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
686 linkEnd--
687 }
688
689 // But don't skip semicolon if it's a part of escaped entity:
690 if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
691 linkEnd--
692 }
693
694 // See if the link finishes with a punctuation sign that can be closed.
695 var copen byte
696 switch data[linkEnd-1] {
697 case '"':
698 copen = '"'
699 case '\'':
700 copen = '\''
701 case ')':
702 copen = '('
703 case ']':
704 copen = '['
705 case '}':
706 copen = '{'
707 default:
708 copen = 0
709 }
710
711 if copen != 0 {
712 bufEnd := offset - rewind + linkEnd - 2
713
714 openDelim := 1
715
716 /* Try to close the final punctuation sign in this same line;
717 * if we managed to close it outside of the URL, that means that it's
718 * not part of the URL. If it closes inside the URL, that means it
719 * is part of the URL.
720 *
721 * Examples:
722 *
723 * foo http://www.pokemon.com/Pikachu_(Electric) bar
724 * => http://www.pokemon.com/Pikachu_(Electric)
725 *
726 * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
727 * => http://www.pokemon.com/Pikachu_(Electric)
728 *
729 * foo http://www.pokemon.com/Pikachu_(Electric)) bar
730 * => http://www.pokemon.com/Pikachu_(Electric))
731 *
732 * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
733 * => foo http://www.pokemon.com/Pikachu_(Electric)
734 */
735
736 for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
737 if origData[bufEnd] == data[linkEnd-1] {
738 openDelim++
739 }
740
741 if origData[bufEnd] == copen {
742 openDelim--
743 }
744
745 bufEnd--
746 }
747
748 if openDelim == 0 {
749 linkEnd--
750 }
751 }
752
753 // we were triggered on the ':', so we need to rewind the output a bit
754 if out.Len() >= rewind {
755 out.Truncate(len(out.Bytes()) - rewind)
756 }
757
758 var uLink bytes.Buffer
759 unescapeText(&uLink, data[:linkEnd])
760
761 if uLink.Len() > 0 {
762 p.r.AutoLink(out, uLink.Bytes(), LINK_TYPE_NORMAL)
763 }
764
765 return linkEnd - rewind
766}
767
768func isEndOfLink(char byte) bool {
769 return isspace(char) || char == '<'
770}
771
772var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
773var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
774
775func isSafeLink(link []byte) bool {
776 for _, path := range validPaths {
777 if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
778 if len(link) == len(path) {
779 return true
780 } else if isalnum(link[len(path)]) {
781 return true
782 }
783 }
784 }
785
786 for _, prefix := range validUris {
787 // TODO: handle unicode here
788 // case-insensitive prefix test
789 if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
790 return true
791 }
792 }
793
794 return false
795}
796
797// return the length of the given tag, or 0 is it's not valid
798func tagLength(data []byte, autolink *int) int {
799 var i, j int
800
801 // a valid tag can't be shorter than 3 chars
802 if len(data) < 3 {
803 return 0
804 }
805
806 // begins with a '<' optionally followed by '/', followed by letter or number
807 if data[0] != '<' {
808 return 0
809 }
810 if data[1] == '/' {
811 i = 2
812 } else {
813 i = 1
814 }
815
816 if !isalnum(data[i]) {
817 return 0
818 }
819
820 // scheme test
821 *autolink = LINK_TYPE_NOT_AUTOLINK
822
823 // try to find the beginning of an URI
824 for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
825 i++
826 }
827
828 if i > 1 && i < len(data) && data[i] == '@' {
829 if j = isMailtoAutoLink(data[i:]); j != 0 {
830 *autolink = LINK_TYPE_EMAIL
831 return i + j
832 }
833 }
834
835 if i > 2 && i < len(data) && data[i] == ':' {
836 *autolink = LINK_TYPE_NORMAL
837 i++
838 }
839
840 // complete autolink test: no whitespace or ' or "
841 switch {
842 case i >= len(data):
843 *autolink = LINK_TYPE_NOT_AUTOLINK
844 case *autolink != 0:
845 j = i
846
847 for i < len(data) {
848 if data[i] == '\\' {
849 i += 2
850 } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
851 break
852 } else {
853 i++
854 }
855
856 }
857
858 if i >= len(data) {
859 return 0
860 }
861 if i > j && data[i] == '>' {
862 return i + 1
863 }
864
865 // one of the forbidden chars has been found
866 *autolink = LINK_TYPE_NOT_AUTOLINK
867 }
868
869 // look for something looking like a tag end
870 for i < len(data) && data[i] != '>' {
871 i++
872 }
873 if i >= len(data) {
874 return 0
875 }
876 return i + 1
877}
878
879// look for the address part of a mail autolink and '>'
880// this is less strict than the original markdown e-mail address matching
881func isMailtoAutoLink(data []byte) int {
882 nb := 0
883
884 // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
885 for i := 0; i < len(data); i++ {
886 if isalnum(data[i]) {
887 continue
888 }
889
890 switch data[i] {
891 case '@':
892 nb++
893
894 case '-', '.', '_':
895 break
896
897 case '>':
898 if nb == 1 {
899 return i + 1
900 } else {
901 return 0
902 }
903 default:
904 return 0
905 }
906 }
907
908 return 0
909}
910
911// look for the next emph char, skipping other constructs
912func helperFindEmphChar(data []byte, c byte) int {
913 i := 1
914
915 for i < len(data) {
916 for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
917 i++
918 }
919 if i >= len(data) {
920 return 0
921 }
922 if data[i] == c {
923 return i
924 }
925
926 // do not count escaped chars
927 if i != 0 && data[i-1] == '\\' {
928 i++
929 continue
930 }
931
932 if data[i] == '`' {
933 // skip a code span
934 tmpI := 0
935 i++
936 for i < len(data) && data[i] != '`' {
937 if tmpI == 0 && data[i] == c {
938 tmpI = i
939 }
940 i++
941 }
942 if i >= len(data) {
943 return tmpI
944 }
945 i++
946 } else if data[i] == '[' {
947 // skip a link
948 tmpI := 0
949 i++
950 for i < len(data) && data[i] != ']' {
951 if tmpI == 0 && data[i] == c {
952 tmpI = i
953 }
954 i++
955 }
956 i++
957 for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
958 i++
959 }
960 if i >= len(data) {
961 return tmpI
962 }
963 if data[i] != '[' && data[i] != '(' { // not a link
964 if tmpI > 0 {
965 return tmpI
966 } else {
967 continue
968 }
969 }
970 cc := data[i]
971 i++
972 for i < len(data) && data[i] != cc {
973 if tmpI == 0 && data[i] == c {
974 return i
975 }
976 i++
977 }
978 if i >= len(data) {
979 return tmpI
980 }
981 i++
982 }
983 }
984 return 0
985}
986
987func helperEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
988 i := 0
989
990 // skip one symbol if coming from emph3
991 if len(data) > 1 && data[0] == c && data[1] == c {
992 i = 1
993 }
994
995 for i < len(data) {
996 length := helperFindEmphChar(data[i:], c)
997 if length == 0 {
998 return 0
999 }
1000 i += length
1001 if i >= len(data) {
1002 return 0
1003 }
1004
1005 if i+1 < len(data) && data[i+1] == c {
1006 i++
1007 continue
1008 }
1009
1010 if data[i] == c && !isspace(data[i-1]) {
1011
1012 if p.flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
1013 if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
1014 continue
1015 }
1016 }
1017
1018 var work bytes.Buffer
1019 p.inline(&work, data[:i])
1020 p.r.Emphasis(out, work.Bytes())
1021 return i + 1
1022 }
1023 }
1024
1025 return 0
1026}
1027
1028func helperDoubleEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
1029 i := 0
1030
1031 for i < len(data) {
1032 length := helperFindEmphChar(data[i:], c)
1033 if length == 0 {
1034 return 0
1035 }
1036 i += length
1037
1038 if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
1039 var work bytes.Buffer
1040 p.inline(&work, data[:i])
1041
1042 if work.Len() > 0 {
1043 // pick the right renderer
1044 if c == '~' {
1045 p.r.StrikeThrough(out, work.Bytes())
1046 } else {
1047 p.r.DoubleEmphasis(out, work.Bytes())
1048 }
1049 }
1050 return i + 2
1051 }
1052 i++
1053 }
1054 return 0
1055}
1056
1057func helperTripleEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int, c byte) int {
1058 i := 0
1059 origData := data
1060 data = data[offset:]
1061
1062 for i < len(data) {
1063 length := helperFindEmphChar(data[i:], c)
1064 if length == 0 {
1065 return 0
1066 }
1067 i += length
1068
1069 // skip whitespace preceded symbols
1070 if data[i] != c || isspace(data[i-1]) {
1071 continue
1072 }
1073
1074 switch {
1075 case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
1076 // triple symbol found
1077 var work bytes.Buffer
1078
1079 p.inline(&work, data[:i])
1080 if work.Len() > 0 {
1081 p.r.TripleEmphasis(out, work.Bytes())
1082 }
1083 return i + 3
1084 case (i+1 < len(data) && data[i+1] == c):
1085 // double symbol found, hand over to emph1
1086 length = helperEmphasis(p, out, origData[offset-2:], c)
1087 if length == 0 {
1088 return 0
1089 } else {
1090 return length - 2
1091 }
1092 default:
1093 // single symbol found, hand over to emph2
1094 length = helperDoubleEmphasis(p, out, origData[offset-1:], c)
1095 if length == 0 {
1096 return 0
1097 } else {
1098 return length - 1
1099 }
1100 }
1101 }
1102 return 0
1103}