all repos — grayfriday @ 40f28ee0224972fc3ac84999e877778a81f1e87d

blackfriday fork with a few changes

inline.go (view raw)

   1//
   2// Blackfriday Markdown Processor
   3// Available at http://github.com/russross/blackfriday
   4//
   5// Copyright © 2011 Russ Ross <russ@russross.com>.
   6// Distributed under the Simplified BSD License.
   7// See README.md for details.
   8//
   9
  10//
  11// Functions to parse inline elements.
  12//
  13
  14package blackfriday
  15
  16import (
  17	"bytes"
  18	"regexp"
  19	"strconv"
  20)
  21
  22var (
  23	urlRe    = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
  24	anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
  25)
  26
  27// Functions to parse text within a block
  28// Each function returns the number of chars taken care of
  29// data is the complete block being rendered
  30// offset is the number of valid chars before the current cursor
  31
  32func (p *parser) inline(out *bytes.Buffer, data []byte) {
  33	// this is called recursively: enforce a maximum depth
  34	if p.nesting >= p.maxNesting {
  35		return
  36	}
  37	p.nesting++
  38
  39	i, end := 0, 0
  40	for i < len(data) {
  41		// copy inactive chars into the output
  42		for end < len(data) && p.inlineCallback[data[end]] == nil {
  43			end++
  44		}
  45
  46		p.r.NormalText(out, data[i:end])
  47
  48		if end >= len(data) {
  49			break
  50		}
  51		i = end
  52
  53		// call the trigger
  54		handler := p.inlineCallback[data[end]]
  55		if consumed := handler(p, out, data, i); consumed == 0 {
  56			// no action from the callback; buffer the byte for later
  57			end = i + 1
  58		} else {
  59			// skip past whatever the callback used
  60			i += consumed
  61			end = i
  62		}
  63	}
  64
  65	p.nesting--
  66}
  67
  68// single and double emphasis parsing
  69func emphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
  70	data = data[offset:]
  71	c := data[0]
  72	ret := 0
  73
  74	if len(data) > 2 && data[1] != c {
  75		// whitespace cannot follow an opening emphasis;
  76		// strikethrough only takes two characters '~~'
  77		if c == '~' || isspace(data[1]) {
  78			return 0
  79		}
  80		if ret = helperEmphasis(p, out, data[1:], c); ret == 0 {
  81			return 0
  82		}
  83
  84		return ret + 1
  85	}
  86
  87	if len(data) > 3 && data[1] == c && data[2] != c {
  88		if isspace(data[2]) {
  89			return 0
  90		}
  91		if ret = helperDoubleEmphasis(p, out, data[2:], c); ret == 0 {
  92			return 0
  93		}
  94
  95		return ret + 2
  96	}
  97
  98	if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
  99		if c == '~' || isspace(data[3]) {
 100			return 0
 101		}
 102		if ret = helperTripleEmphasis(p, out, data, 3, c); ret == 0 {
 103			return 0
 104		}
 105
 106		return ret + 3
 107	}
 108
 109	return 0
 110}
 111
 112func codeSpan(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 113	data = data[offset:]
 114
 115	nb := 0
 116
 117	// count the number of backticks in the delimiter
 118	for nb < len(data) && data[nb] == '`' {
 119		nb++
 120	}
 121
 122	// find the next delimiter
 123	i, end := 0, 0
 124	for end = nb; end < len(data) && i < nb; end++ {
 125		if data[end] == '`' {
 126			i++
 127		} else {
 128			i = 0
 129		}
 130	}
 131
 132	// no matching delimiter?
 133	if i < nb && end >= len(data) {
 134		return 0
 135	}
 136
 137	// trim outside whitespace
 138	fBegin := nb
 139	for fBegin < end && data[fBegin] == ' ' {
 140		fBegin++
 141	}
 142
 143	fEnd := end - nb
 144	for fEnd > fBegin && data[fEnd-1] == ' ' {
 145		fEnd--
 146	}
 147
 148	// render the code span
 149	if fBegin != fEnd {
 150		p.r.CodeSpan(out, data[fBegin:fEnd])
 151	}
 152
 153	return end
 154
 155}
 156
 157// newline preceded by two spaces becomes <br>
 158// newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled
 159func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 160	// remove trailing spaces from out
 161	outBytes := out.Bytes()
 162	end := len(outBytes)
 163	eol := end
 164	for eol > 0 && outBytes[eol-1] == ' ' {
 165		eol--
 166	}
 167	out.Truncate(eol)
 168
 169	precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
 170
 171	// should there be a hard line break here?
 172	if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces {
 173		return 0
 174	}
 175
 176	p.r.LineBreak(out)
 177	return 1
 178}
 179
 180type linkType int
 181
 182const (
 183	linkNormal linkType = iota
 184	linkImg
 185	linkDeferredFootnote
 186	linkInlineFootnote
 187)
 188
 189// '[': parse a link or an image or a footnote
 190func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 191	// no links allowed inside regular links, footnote, and deferred footnotes
 192	if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
 193		return 0
 194	}
 195
 196	// [text] == regular link
 197	// ![alt] == image
 198	// ^[text] == inline footnote
 199	// [^refId] == deferred footnote
 200	var t linkType
 201	if offset > 0 && data[offset-1] == '!' {
 202		t = linkImg
 203	} else if p.flags&EXTENSION_FOOTNOTES != 0 {
 204		if offset > 0 && data[offset-1] == '^' {
 205			t = linkInlineFootnote
 206		} else if len(data)-1 > offset && data[offset+1] == '^' {
 207			t = linkDeferredFootnote
 208		}
 209	}
 210
 211	data = data[offset:]
 212
 213	var (
 214		i           = 1
 215		noteId      int
 216		title, link []byte
 217		textHasNl   = false
 218	)
 219
 220	if t == linkDeferredFootnote {
 221		i++
 222	}
 223
 224	// look for the matching closing bracket
 225	for level := 1; level > 0 && i < len(data); i++ {
 226		switch {
 227		case data[i] == '\n':
 228			textHasNl = true
 229
 230		case data[i-1] == '\\':
 231			continue
 232
 233		case data[i] == '[':
 234			level++
 235
 236		case data[i] == ']':
 237			level--
 238			if level <= 0 {
 239				i-- // compensate for extra i++ in for loop
 240			}
 241		}
 242	}
 243
 244	if i >= len(data) {
 245		return 0
 246	}
 247
 248	txtE := i
 249	i++
 250
 251	// skip any amount of whitespace or newline
 252	// (this is much more lax than original markdown syntax)
 253	for i < len(data) && isspace(data[i]) {
 254		i++
 255	}
 256
 257	// inline style link
 258	switch {
 259	case i < len(data) && data[i] == '(':
 260		// skip initial whitespace
 261		i++
 262
 263		for i < len(data) && isspace(data[i]) {
 264			i++
 265		}
 266
 267		linkB := i
 268
 269		// look for link end: ' " )
 270	findlinkend:
 271		for i < len(data) {
 272			switch {
 273			case data[i] == '\\':
 274				i += 2
 275
 276			case data[i] == ')' || data[i] == '\'' || data[i] == '"':
 277				break findlinkend
 278
 279			default:
 280				i++
 281			}
 282		}
 283
 284		if i >= len(data) {
 285			return 0
 286		}
 287		linkE := i
 288
 289		// look for title end if present
 290		titleB, titleE := 0, 0
 291		if data[i] == '\'' || data[i] == '"' {
 292			i++
 293			titleB = i
 294
 295		findtitleend:
 296			for i < len(data) {
 297				switch {
 298				case data[i] == '\\':
 299					i += 2
 300
 301				case data[i] == ')':
 302					break findtitleend
 303
 304				default:
 305					i++
 306				}
 307			}
 308
 309			if i >= len(data) {
 310				return 0
 311			}
 312
 313			// skip whitespace after title
 314			titleE = i - 1
 315			for titleE > titleB && isspace(data[titleE]) {
 316				titleE--
 317			}
 318
 319			// check for closing quote presence
 320			if data[titleE] != '\'' && data[titleE] != '"' {
 321				titleB, titleE = 0, 0
 322				linkE = i
 323			}
 324		}
 325
 326		// remove whitespace at the end of the link
 327		for linkE > linkB && isspace(data[linkE-1]) {
 328			linkE--
 329		}
 330
 331		// remove optional angle brackets around the link
 332		if data[linkB] == '<' {
 333			linkB++
 334		}
 335		if data[linkE-1] == '>' {
 336			linkE--
 337		}
 338
 339		// build escaped link and title
 340		if linkE > linkB {
 341			link = data[linkB:linkE]
 342		}
 343
 344		if titleE > titleB {
 345			title = data[titleB:titleE]
 346		}
 347
 348		i++
 349
 350	// reference style link
 351	case i < len(data) && data[i] == '[':
 352		var id []byte
 353
 354		// look for the id
 355		i++
 356		linkB := i
 357		for i < len(data) && data[i] != ']' {
 358			i++
 359		}
 360		if i >= len(data) {
 361			return 0
 362		}
 363		linkE := i
 364
 365		// find the reference
 366		if linkB == linkE {
 367			if textHasNl {
 368				var b bytes.Buffer
 369
 370				for j := 1; j < txtE; j++ {
 371					switch {
 372					case data[j] != '\n':
 373						b.WriteByte(data[j])
 374					case data[j-1] != ' ':
 375						b.WriteByte(' ')
 376					}
 377				}
 378
 379				id = b.Bytes()
 380			} else {
 381				id = data[1:txtE]
 382			}
 383		} else {
 384			id = data[linkB:linkE]
 385		}
 386
 387		// find the reference with matching id (ids are case-insensitive)
 388		key := string(bytes.ToLower(id))
 389		lr, ok := p.refs[key]
 390		if !ok {
 391			return 0
 392
 393		}
 394
 395		// keep link and title from reference
 396		link = lr.link
 397		title = lr.title
 398		i++
 399
 400	// shortcut reference style link or reference or inline footnote
 401	default:
 402		var id []byte
 403
 404		// craft the id
 405		if textHasNl {
 406			var b bytes.Buffer
 407
 408			for j := 1; j < txtE; j++ {
 409				switch {
 410				case data[j] != '\n':
 411					b.WriteByte(data[j])
 412				case data[j-1] != ' ':
 413					b.WriteByte(' ')
 414				}
 415			}
 416
 417			id = b.Bytes()
 418		} else {
 419			if t == linkDeferredFootnote {
 420				id = data[2:txtE] // get rid of the ^
 421			} else {
 422				id = data[1:txtE]
 423			}
 424		}
 425
 426		key := string(bytes.ToLower(id))
 427		if t == linkInlineFootnote {
 428			// create a new reference
 429			noteId = len(p.notes) + 1
 430
 431			var fragment []byte
 432			if len(id) > 0 {
 433				if len(id) < 16 {
 434					fragment = make([]byte, len(id))
 435				} else {
 436					fragment = make([]byte, 16)
 437				}
 438				copy(fragment, slugify(id))
 439			} else {
 440				fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
 441			}
 442
 443			ref := &reference{
 444				noteId:   noteId,
 445				hasBlock: false,
 446				link:     fragment,
 447				title:    id,
 448			}
 449
 450			p.notes = append(p.notes, ref)
 451
 452			link = ref.link
 453			title = ref.title
 454		} else {
 455			// find the reference with matching id
 456			lr, ok := p.refs[key]
 457			if !ok {
 458				return 0
 459			}
 460
 461			if t == linkDeferredFootnote {
 462				lr.noteId = len(p.notes) + 1
 463				p.notes = append(p.notes, lr)
 464			}
 465
 466			// keep link and title from reference
 467			link = lr.link
 468			// if inline footnote, title == footnote contents
 469			title = lr.title
 470			noteId = lr.noteId
 471		}
 472
 473		// rewind the whitespace
 474		i = txtE + 1
 475	}
 476
 477	// build content: img alt is escaped, link content is parsed
 478	var content bytes.Buffer
 479	if txtE > 1 {
 480		if t == linkImg {
 481			content.Write(data[1:txtE])
 482		} else {
 483			// links cannot contain other links, so turn off link parsing temporarily
 484			insideLink := p.insideLink
 485			p.insideLink = true
 486			p.inline(&content, data[1:txtE])
 487			p.insideLink = insideLink
 488		}
 489	}
 490
 491	var uLink []byte
 492	if t == linkNormal || t == linkImg {
 493		if len(link) > 0 {
 494			var uLinkBuf bytes.Buffer
 495			unescapeText(&uLinkBuf, link)
 496			uLink = uLinkBuf.Bytes()
 497		}
 498
 499		// links need something to click on and somewhere to go
 500		if len(uLink) == 0 || (t == linkNormal && content.Len() == 0) {
 501			return 0
 502		}
 503	}
 504
 505	// call the relevant rendering function
 506	switch t {
 507	case linkNormal:
 508		p.r.Link(out, uLink, title, content.Bytes())
 509
 510	case linkImg:
 511		outSize := out.Len()
 512		outBytes := out.Bytes()
 513		if outSize > 0 && outBytes[outSize-1] == '!' {
 514			out.Truncate(outSize - 1)
 515		}
 516
 517		p.r.Image(out, uLink, title, content.Bytes())
 518
 519	case linkInlineFootnote:
 520		outSize := out.Len()
 521		outBytes := out.Bytes()
 522		if outSize > 0 && outBytes[outSize-1] == '^' {
 523			out.Truncate(outSize - 1)
 524		}
 525
 526		p.r.FootnoteRef(out, link, noteId)
 527
 528	case linkDeferredFootnote:
 529		p.r.FootnoteRef(out, link, noteId)
 530
 531	default:
 532		return 0
 533	}
 534
 535	return i
 536}
 537
 538// '<' when tags or autolinks are allowed
 539func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 540	data = data[offset:]
 541	altype := LINK_TYPE_NOT_AUTOLINK
 542	end := tagLength(data, &altype)
 543
 544	if end > 2 {
 545		if altype != LINK_TYPE_NOT_AUTOLINK {
 546			var uLink bytes.Buffer
 547			unescapeText(&uLink, data[1:end+1-2])
 548			if uLink.Len() > 0 {
 549				p.r.AutoLink(out, uLink.Bytes(), altype)
 550			}
 551		} else {
 552			p.r.RawHtmlTag(out, data[:end])
 553		}
 554	}
 555
 556	return end
 557}
 558
 559// '\\' backslash escape
 560var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
 561
 562func escape(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 563	data = data[offset:]
 564
 565	if len(data) > 1 {
 566		if bytes.IndexByte(escapeChars, data[1]) < 0 {
 567			return 0
 568		}
 569
 570		p.r.NormalText(out, data[1:2])
 571	}
 572
 573	return 2
 574}
 575
 576func unescapeText(ob *bytes.Buffer, src []byte) {
 577	i := 0
 578	for i < len(src) {
 579		org := i
 580		for i < len(src) && src[i] != '\\' {
 581			i++
 582		}
 583
 584		if i > org {
 585			ob.Write(src[org:i])
 586		}
 587
 588		if i+1 >= len(src) {
 589			break
 590		}
 591
 592		ob.WriteByte(src[i+1])
 593		i += 2
 594	}
 595}
 596
 597// '&' escaped when it doesn't belong to an entity
 598// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
 599func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 600	data = data[offset:]
 601
 602	end := 1
 603
 604	if end < len(data) && data[end] == '#' {
 605		end++
 606	}
 607
 608	for end < len(data) && isalnum(data[end]) {
 609		end++
 610	}
 611
 612	if end < len(data) && data[end] == ';' {
 613		end++ // real entity
 614	} else {
 615		return 0 // lone '&'
 616	}
 617
 618	p.r.Entity(out, data[:end])
 619
 620	return end
 621}
 622
 623func linkEndsWithEntity(data []byte, linkEnd int) bool {
 624	entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
 625	if entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd {
 626		return true
 627	}
 628	return false
 629}
 630
 631func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
 632	// quick check to rule out most false hits on ':'
 633	if p.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' {
 634		return 0
 635	}
 636
 637	// Now a more expensive check to see if we're not inside an anchor element
 638	anchorStart := offset
 639	offsetFromAnchor := 0
 640	for anchorStart > 0 && data[anchorStart] != '<' {
 641		anchorStart--
 642		offsetFromAnchor++
 643	}
 644
 645	anchorStr := anchorRe.Find(data[anchorStart:])
 646	if anchorStr != nil {
 647		out.Write(anchorStr[offsetFromAnchor:])
 648		return len(anchorStr) - offsetFromAnchor
 649	}
 650
 651	// scan backward for a word boundary
 652	rewind := 0
 653	for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
 654		rewind++
 655	}
 656	if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
 657		return 0
 658	}
 659
 660	origData := data
 661	data = data[offset-rewind:]
 662
 663	if !isSafeLink(data) {
 664		return 0
 665	}
 666
 667	linkEnd := 0
 668	for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
 669		linkEnd++
 670	}
 671
 672	// Skip punctuation at the end of the link
 673	if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
 674		linkEnd--
 675	}
 676
 677	// But don't skip semicolon if it's a part of escaped entity:
 678	if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
 679		linkEnd--
 680	}
 681
 682	// See if the link finishes with a punctuation sign that can be closed.
 683	var copen byte
 684	switch data[linkEnd-1] {
 685	case '"':
 686		copen = '"'
 687	case '\'':
 688		copen = '\''
 689	case ')':
 690		copen = '('
 691	case ']':
 692		copen = '['
 693	case '}':
 694		copen = '{'
 695	default:
 696		copen = 0
 697	}
 698
 699	if copen != 0 {
 700		bufEnd := offset - rewind + linkEnd - 2
 701
 702		openDelim := 1
 703
 704		/* Try to close the final punctuation sign in this same line;
 705		 * if we managed to close it outside of the URL, that means that it's
 706		 * not part of the URL. If it closes inside the URL, that means it
 707		 * is part of the URL.
 708		 *
 709		 * Examples:
 710		 *
 711		 *      foo http://www.pokemon.com/Pikachu_(Electric) bar
 712		 *              => http://www.pokemon.com/Pikachu_(Electric)
 713		 *
 714		 *      foo (http://www.pokemon.com/Pikachu_(Electric)) bar
 715		 *              => http://www.pokemon.com/Pikachu_(Electric)
 716		 *
 717		 *      foo http://www.pokemon.com/Pikachu_(Electric)) bar
 718		 *              => http://www.pokemon.com/Pikachu_(Electric))
 719		 *
 720		 *      (foo http://www.pokemon.com/Pikachu_(Electric)) bar
 721		 *              => foo http://www.pokemon.com/Pikachu_(Electric)
 722		 */
 723
 724		for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
 725			if origData[bufEnd] == data[linkEnd-1] {
 726				openDelim++
 727			}
 728
 729			if origData[bufEnd] == copen {
 730				openDelim--
 731			}
 732
 733			bufEnd--
 734		}
 735
 736		if openDelim == 0 {
 737			linkEnd--
 738		}
 739	}
 740
 741	// we were triggered on the ':', so we need to rewind the output a bit
 742	if out.Len() >= rewind {
 743		out.Truncate(len(out.Bytes()) - rewind)
 744	}
 745
 746	var uLink bytes.Buffer
 747	unescapeText(&uLink, data[:linkEnd])
 748
 749	if uLink.Len() > 0 {
 750		p.r.AutoLink(out, uLink.Bytes(), LINK_TYPE_NORMAL)
 751	}
 752
 753	return linkEnd - rewind
 754}
 755
 756func isEndOfLink(char byte) bool {
 757	return isspace(char) || char == '<'
 758}
 759
 760var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://"), []byte("/")}
 761
 762func isSafeLink(link []byte) bool {
 763	for _, prefix := range validUris {
 764		// TODO: handle unicode here
 765		// case-insensitive prefix test
 766		if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
 767			return true
 768		}
 769	}
 770
 771	return false
 772}
 773
 774// return the length of the given tag, or 0 is it's not valid
 775func tagLength(data []byte, autolink *int) int {
 776	var i, j int
 777
 778	// a valid tag can't be shorter than 3 chars
 779	if len(data) < 3 {
 780		return 0
 781	}
 782
 783	// begins with a '<' optionally followed by '/', followed by letter or number
 784	if data[0] != '<' {
 785		return 0
 786	}
 787	if data[1] == '/' {
 788		i = 2
 789	} else {
 790		i = 1
 791	}
 792
 793	if !isalnum(data[i]) {
 794		return 0
 795	}
 796
 797	// scheme test
 798	*autolink = LINK_TYPE_NOT_AUTOLINK
 799
 800	// try to find the beginning of an URI
 801	for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
 802		i++
 803	}
 804
 805	if i > 1 && i < len(data) && data[i] == '@' {
 806		if j = isMailtoAutoLink(data[i:]); j != 0 {
 807			*autolink = LINK_TYPE_EMAIL
 808			return i + j
 809		}
 810	}
 811
 812	if i > 2 && i < len(data) && data[i] == ':' {
 813		*autolink = LINK_TYPE_NORMAL
 814		i++
 815	}
 816
 817	// complete autolink test: no whitespace or ' or "
 818	switch {
 819	case i >= len(data):
 820		*autolink = LINK_TYPE_NOT_AUTOLINK
 821	case *autolink != 0:
 822		j = i
 823
 824		for i < len(data) {
 825			if data[i] == '\\' {
 826				i += 2
 827			} else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
 828				break
 829			} else {
 830				i++
 831			}
 832
 833		}
 834
 835		if i >= len(data) {
 836			return 0
 837		}
 838		if i > j && data[i] == '>' {
 839			return i + 1
 840		}
 841
 842		// one of the forbidden chars has been found
 843		*autolink = LINK_TYPE_NOT_AUTOLINK
 844	}
 845
 846	// look for something looking like a tag end
 847	for i < len(data) && data[i] != '>' {
 848		i++
 849	}
 850	if i >= len(data) {
 851		return 0
 852	}
 853	return i + 1
 854}
 855
 856// look for the address part of a mail autolink and '>'
 857// this is less strict than the original markdown e-mail address matching
 858func isMailtoAutoLink(data []byte) int {
 859	nb := 0
 860
 861	// address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
 862	for i := 0; i < len(data); i++ {
 863		if isalnum(data[i]) {
 864			continue
 865		}
 866
 867		switch data[i] {
 868		case '@':
 869			nb++
 870
 871		case '-', '.', '_':
 872			break
 873
 874		case '>':
 875			if nb == 1 {
 876				return i + 1
 877			} else {
 878				return 0
 879			}
 880		default:
 881			return 0
 882		}
 883	}
 884
 885	return 0
 886}
 887
 888// look for the next emph char, skipping other constructs
 889func helperFindEmphChar(data []byte, c byte) int {
 890	i := 1
 891
 892	for i < len(data) {
 893		for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
 894			i++
 895		}
 896		if i >= len(data) {
 897			return 0
 898		}
 899		if data[i] == c {
 900			return i
 901		}
 902
 903		// do not count escaped chars
 904		if i != 0 && data[i-1] == '\\' {
 905			i++
 906			continue
 907		}
 908
 909		if data[i] == '`' {
 910			// skip a code span
 911			tmpI := 0
 912			i++
 913			for i < len(data) && data[i] != '`' {
 914				if tmpI == 0 && data[i] == c {
 915					tmpI = i
 916				}
 917				i++
 918			}
 919			if i >= len(data) {
 920				return tmpI
 921			}
 922			i++
 923		} else if data[i] == '[' {
 924			// skip a link
 925			tmpI := 0
 926			i++
 927			for i < len(data) && data[i] != ']' {
 928				if tmpI == 0 && data[i] == c {
 929					tmpI = i
 930				}
 931				i++
 932			}
 933			i++
 934			for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
 935				i++
 936			}
 937			if i >= len(data) {
 938				return tmpI
 939			}
 940			if data[i] != '[' && data[i] != '(' { // not a link
 941				if tmpI > 0 {
 942					return tmpI
 943				} else {
 944					continue
 945				}
 946			}
 947			cc := data[i]
 948			i++
 949			for i < len(data) && data[i] != cc {
 950				if tmpI == 0 && data[i] == c {
 951					tmpI = i
 952				}
 953				i++
 954			}
 955			if i >= len(data) {
 956				return tmpI
 957			}
 958			i++
 959		}
 960	}
 961	return 0
 962}
 963
 964func helperEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
 965	i := 0
 966
 967	// skip one symbol if coming from emph3
 968	if len(data) > 1 && data[0] == c && data[1] == c {
 969		i = 1
 970	}
 971
 972	for i < len(data) {
 973		length := helperFindEmphChar(data[i:], c)
 974		if length == 0 {
 975			return 0
 976		}
 977		i += length
 978		if i >= len(data) {
 979			return 0
 980		}
 981
 982		if i+1 < len(data) && data[i+1] == c {
 983			i++
 984			continue
 985		}
 986
 987		if data[i] == c && !isspace(data[i-1]) {
 988
 989			if p.flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
 990				if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
 991					continue
 992				}
 993			}
 994
 995			var work bytes.Buffer
 996			p.inline(&work, data[:i])
 997			p.r.Emphasis(out, work.Bytes())
 998			return i + 1
 999		}
1000	}
1001
1002	return 0
1003}
1004
1005func helperDoubleEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
1006	i := 0
1007
1008	for i < len(data) {
1009		length := helperFindEmphChar(data[i:], c)
1010		if length == 0 {
1011			return 0
1012		}
1013		i += length
1014
1015		if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
1016			var work bytes.Buffer
1017			p.inline(&work, data[:i])
1018
1019			if work.Len() > 0 {
1020				// pick the right renderer
1021				if c == '~' {
1022					p.r.StrikeThrough(out, work.Bytes())
1023				} else {
1024					p.r.DoubleEmphasis(out, work.Bytes())
1025				}
1026			}
1027			return i + 2
1028		}
1029		i++
1030	}
1031	return 0
1032}
1033
1034func helperTripleEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int, c byte) int {
1035	i := 0
1036	origData := data
1037	data = data[offset:]
1038
1039	for i < len(data) {
1040		length := helperFindEmphChar(data[i:], c)
1041		if length == 0 {
1042			return 0
1043		}
1044		i += length
1045
1046		// skip whitespace preceded symbols
1047		if data[i] != c || isspace(data[i-1]) {
1048			continue
1049		}
1050
1051		switch {
1052		case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
1053			// triple symbol found
1054			var work bytes.Buffer
1055
1056			p.inline(&work, data[:i])
1057			if work.Len() > 0 {
1058				p.r.TripleEmphasis(out, work.Bytes())
1059			}
1060			return i + 3
1061		case (i+1 < len(data) && data[i+1] == c):
1062			// double symbol found, hand over to emph1
1063			length = helperEmphasis(p, out, origData[offset-2:], c)
1064			if length == 0 {
1065				return 0
1066			} else {
1067				return length - 2
1068			}
1069		default:
1070			// single symbol found, hand over to emph2
1071			length = helperDoubleEmphasis(p, out, origData[offset-1:], c)
1072			if length == 0 {
1073				return 0
1074			} else {
1075				return length - 1
1076			}
1077		}
1078	}
1079	return 0
1080}