all repos — grayfriday @ c23099e5ee07416d22e09d96f1872810d549579c

blackfriday fork with a few changes

markdown.go (view raw)

  1//
  2// Blackfriday Markdown Processor
  3// Available at http://github.com/russross/blackfriday
  4//
  5// Copyright © 2011 Russ Ross <russ@russross.com>.
  6// Distributed under the Simplified BSD License.
  7// See README.md for details.
  8//
  9
 10//
 11//
 12// Markdown parsing and processing
 13//
 14//
 15
 16// Blackfriday markdown processor.
 17//
 18// Translates plain text with simple formatting rules into HTML or LaTeX.
 19package blackfriday
 20
 21import (
 22	"bytes"
 23	"unicode/utf8"
 24)
 25
 26const VERSION = "1.1"
 27
 28// These are the supported markdown parsing extensions.
 29// OR these values together to select multiple extensions.
 30const (
 31	EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
 32	EXTENSION_TABLES                        // render tables
 33	EXTENSION_FENCED_CODE                   // render fenced code blocks
 34	EXTENSION_AUTOLINK                      // detect embedded URLs that are not explicitly marked
 35	EXTENSION_STRIKETHROUGH                 // strikethrough text using ~~test~~
 36	EXTENSION_LAX_HTML_BLOCKS               // loosen up HTML block parsing rules
 37	EXTENSION_SPACE_HEADERS                 // be strict about prefix header rules
 38	EXTENSION_HARD_LINE_BREAK               // translate newlines into line breaks
 39	EXTENSION_TAB_SIZE_EIGHT                // expand tabs to eight spaces instead of four
 40	EXTENSION_FOOTNOTES                     // Pandoc-style footnotes
 41)
 42
 43// These are the possible flag values for the link renderer.
 44// Only a single one of these values will be used; they are not ORed together.
 45// These are mostly of interest if you are writing a new output format.
 46const (
 47	LINK_TYPE_NOT_AUTOLINK = iota
 48	LINK_TYPE_NORMAL
 49	LINK_TYPE_EMAIL
 50)
 51
 52// These are the possible flag values for the ListItem renderer.
 53// Multiple flag values may be ORed together.
 54// These are mostly of interest if you are writing a new output format.
 55const (
 56	LIST_TYPE_ORDERED = 1 << iota
 57	LIST_ITEM_CONTAINS_BLOCK
 58	LIST_ITEM_BEGINNING_OF_LIST
 59	LIST_ITEM_END_OF_LIST
 60)
 61
 62// These are the possible flag values for the table cell renderer.
 63// Only a single one of these values will be used; they are not ORed together.
 64// These are mostly of interest if you are writing a new output format.
 65const (
 66	TABLE_ALIGNMENT_LEFT = 1 << iota
 67	TABLE_ALIGNMENT_RIGHT
 68	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
 69)
 70
 71// The size of a tab stop.
 72const (
 73	TAB_SIZE_DEFAULT = 4
 74	TAB_SIZE_EIGHT   = 8
 75)
 76
 77// These are the tags that are recognized as HTML block tags.
 78// Any of these can be included in markdown text without special escaping.
 79var blockTags = map[string]bool{
 80	"p":          true,
 81	"dl":         true,
 82	"h1":         true,
 83	"h2":         true,
 84	"h3":         true,
 85	"h4":         true,
 86	"h5":         true,
 87	"h6":         true,
 88	"ol":         true,
 89	"ul":         true,
 90	"del":        true,
 91	"div":        true,
 92	"ins":        true,
 93	"pre":        true,
 94	"form":       true,
 95	"math":       true,
 96	"table":      true,
 97	"iframe":     true,
 98	"script":     true,
 99	"fieldset":   true,
100	"noscript":   true,
101	"blockquote": true,
102
103	// HTML5
104	"video":      true,
105	"aside":      true,
106	"canvas":     true,
107	"figure":     true,
108	"footer":     true,
109	"header":     true,
110	"hgroup":     true,
111	"output":     true,
112	"article":    true,
113	"section":    true,
114	"progress":   true,
115	"figcaption": true,
116}
117
118// Renderer is the rendering interface.
119// This is mostly of interest if you are implementing a new rendering format.
120//
121// When a byte slice is provided, it contains the (rendered) contents of the
122// element.
123//
124// When a callback is provided instead, it will write the contents of the
125// respective element directly to the output buffer and return true on success.
126// If the callback returns false, the rendering function should reset the
127// output buffer as though it had never been called.
128//
129// Currently Html and Latex implementations are provided
130type Renderer interface {
131	// block-level callbacks
132	BlockCode(out *bytes.Buffer, text []byte, lang string)
133	BlockQuote(out *bytes.Buffer, text []byte)
134	BlockHtml(out *bytes.Buffer, text []byte)
135	Header(out *bytes.Buffer, text func() bool, level int)
136	HRule(out *bytes.Buffer)
137	List(out *bytes.Buffer, text func() bool, flags int)
138	ListItem(out *bytes.Buffer, text []byte, flags int)
139	Paragraph(out *bytes.Buffer, text func() bool)
140	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
141	TableRow(out *bytes.Buffer, text []byte)
142	TableCell(out *bytes.Buffer, text []byte, flags int)
143	Footnotes(out *bytes.Buffer, text func() bool)
144	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
145
146	// Span-level callbacks
147	AutoLink(out *bytes.Buffer, link []byte, kind int)
148	CodeSpan(out *bytes.Buffer, text []byte)
149	DoubleEmphasis(out *bytes.Buffer, text []byte)
150	Emphasis(out *bytes.Buffer, text []byte)
151	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
152	LineBreak(out *bytes.Buffer)
153	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
154	RawHtmlTag(out *bytes.Buffer, tag []byte)
155	TripleEmphasis(out *bytes.Buffer, text []byte)
156	StrikeThrough(out *bytes.Buffer, text []byte)
157	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
158
159	// Low-level callbacks
160	Entity(out *bytes.Buffer, entity []byte)
161	NormalText(out *bytes.Buffer, text []byte)
162
163	// Header and footer
164	DocumentHeader(out *bytes.Buffer)
165	DocumentFooter(out *bytes.Buffer)
166}
167
168// Callback functions for inline parsing. One such function is defined
169// for each character that triggers a response when parsing inline data.
170type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
171
172// Parser holds runtime state used by the parser.
173// This is constructed by the Markdown function.
174type parser struct {
175	r              Renderer
176	refs           map[string]*reference
177	inlineCallback [256]inlineParser
178	flags          int
179	nesting        int
180	maxNesting     int
181	insideLink     bool
182
183	// Footnotes need to be ordered as well as available to quickly check for
184	// presence. If a ref is also a footnote, it's stored both in refs and here
185	// in notes. Slice is nil if footnotes not enabled.
186	notes []*reference
187}
188
189//
190//
191// Public interface
192//
193//
194
195// MarkdownBasic is a convenience function for simple rendering.
196// It processes markdown input with no extensions enabled.
197func MarkdownBasic(input []byte) []byte {
198	// set up the HTML renderer
199	htmlFlags := HTML_USE_XHTML
200	renderer := HtmlRenderer(htmlFlags, "", "")
201
202	// set up the parser
203	extensions := 0
204
205	return Markdown(input, renderer, extensions)
206}
207
208// Call Markdown with most useful extensions enabled
209// MarkdownCommon is a convenience function for simple rendering.
210// It processes markdown input with common extensions enabled, including:
211//
212// * Smartypants processing with smart fractions and LaTeX dashes
213//
214// * Intra-word emphasis suppression
215//
216// * Tables
217//
218// * Fenced code blocks
219//
220// * Autolinking
221//
222// * Strikethrough support
223//
224// * Strict header parsing
225func MarkdownCommon(input []byte) []byte {
226	// set up the HTML renderer
227	htmlFlags := 0
228	htmlFlags |= HTML_USE_XHTML
229	htmlFlags |= HTML_USE_SMARTYPANTS
230	htmlFlags |= HTML_SMARTYPANTS_FRACTIONS
231	htmlFlags |= HTML_SMARTYPANTS_LATEX_DASHES
232	htmlFlags |= HTML_SKIP_SCRIPT
233	renderer := HtmlRenderer(htmlFlags, "", "")
234
235	// set up the parser
236	extensions := 0
237	extensions |= EXTENSION_NO_INTRA_EMPHASIS
238	extensions |= EXTENSION_TABLES
239	extensions |= EXTENSION_FENCED_CODE
240	extensions |= EXTENSION_AUTOLINK
241	extensions |= EXTENSION_STRIKETHROUGH
242	extensions |= EXTENSION_SPACE_HEADERS
243
244	return Markdown(input, renderer, extensions)
245}
246
247// Markdown is the main rendering function.
248// It parses and renders a block of markdown-encoded text.
249// The supplied Renderer is used to format the output, and extensions dictates
250// which non-standard extensions are enabled.
251//
252// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
253// LatexRenderer, respectively.
254func Markdown(input []byte, renderer Renderer, extensions int) []byte {
255	// no point in parsing if we can't render
256	if renderer == nil {
257		return nil
258	}
259
260	// fill in the render structure
261	p := new(parser)
262	p.r = renderer
263	p.flags = extensions
264	p.refs = make(map[string]*reference)
265	p.maxNesting = 16
266	p.insideLink = false
267
268	// register inline parsers
269	p.inlineCallback['*'] = emphasis
270	p.inlineCallback['_'] = emphasis
271	if extensions&EXTENSION_STRIKETHROUGH != 0 {
272		p.inlineCallback['~'] = emphasis
273	}
274	p.inlineCallback['`'] = codeSpan
275	p.inlineCallback['\n'] = lineBreak
276	p.inlineCallback['['] = link
277	p.inlineCallback['<'] = leftAngle
278	p.inlineCallback['\\'] = escape
279	p.inlineCallback['&'] = entity
280
281	if extensions&EXTENSION_AUTOLINK != 0 {
282		p.inlineCallback[':'] = autoLink
283	}
284
285	if extensions&EXTENSION_FOOTNOTES != 0 {
286		p.notes = make([]*reference, 0)
287	}
288
289	first := firstPass(p, input)
290	second := secondPass(p, first)
291
292	return second
293}
294
295// first pass:
296// - extract references
297// - expand tabs
298// - normalize newlines
299// - copy everything else
300func firstPass(p *parser, input []byte) []byte {
301	var out bytes.Buffer
302	tabSize := TAB_SIZE_DEFAULT
303	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
304		tabSize = TAB_SIZE_EIGHT
305	}
306	beg, end := 0, 0
307	for beg < len(input) { // iterate over lines
308		if end = isReference(p, input[beg:], tabSize); end > 0 {
309			beg += end
310		} else { // skip to the next line
311			end = beg
312			for end < len(input) && input[end] != '\n' && input[end] != '\r' {
313				end++
314			}
315
316			// add the line body if present
317			if end > beg {
318				expandTabs(&out, input[beg:end], tabSize)
319			}
320			out.WriteByte('\n')
321
322			if end < len(input) && input[end] == '\r' {
323				end++
324			}
325			if end < len(input) && input[end] == '\n' {
326				end++
327			}
328
329			beg = end
330		}
331	}
332
333	// empty input?
334	if out.Len() == 0 {
335		out.WriteByte('\n')
336	}
337
338	return out.Bytes()
339}
340
341// second pass: actual rendering
342func secondPass(p *parser, input []byte) []byte {
343	var output bytes.Buffer
344
345	p.r.DocumentHeader(&output)
346	p.block(&output, input)
347
348	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
349		p.r.Footnotes(&output, func() bool {
350			flags := LIST_ITEM_BEGINNING_OF_LIST
351			for _, ref := range p.notes {
352				var buf bytes.Buffer
353				if ref.hasBlock {
354					flags |= LIST_ITEM_CONTAINS_BLOCK
355					p.block(&buf, ref.title)
356				} else {
357					p.inline(&buf, ref.title)
358				}
359				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
360				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
361			}
362
363			return true
364		})
365	}
366
367	p.r.DocumentFooter(&output)
368
369	if p.nesting != 0 {
370		panic("Nesting level did not end at zero")
371	}
372
373	return output.Bytes()
374}
375
376//
377// Link references
378//
379// This section implements support for references that (usually) appear
380// as footnotes in a document, and can be referenced anywhere in the document.
381// The basic format is:
382//
383//    [1]: http://www.google.com/ "Google"
384//    [2]: http://www.github.com/ "Github"
385//
386// Anywhere in the document, the reference can be linked by referring to its
387// label, i.e., 1 and 2 in this example, as in:
388//
389//    This library is hosted on [Github][2], a git hosting site.
390//
391// Actual footnotes as specified in Pandoc and supported by some other Markdown
392// libraries such as php-markdown are also taken care of. They look like this:
393//
394//    This sentence needs a bit of further explanation.[^note]
395//
396//    [^note]: This is the explanation.
397//
398// Footnotes should be placed at the end of the document in an ordered list.
399// Inline footnotes such as:
400//
401//    Inline footnotes^[Not supported.] also exist.
402//
403// are not yet supported.
404
405// References are parsed and stored in this struct.
406type reference struct {
407	link     []byte
408	title    []byte
409	noteId   int // 0 if not a footnote ref
410	hasBlock bool
411}
412
413// Check whether or not data starts with a reference link.
414// If so, it is parsed and stored in the list of references
415// (in the render struct).
416// Returns the number of bytes to skip to move past it,
417// or zero if the first line is not a reference.
418func isReference(p *parser, data []byte, tabSize int) int {
419	if len(data) >= 5 {
420	}
421	// up to 3 optional leading spaces
422	if len(data) < 4 {
423		return 0
424	}
425	i := 0
426	for i < 3 && data[i] == ' ' {
427		i++
428	}
429
430	noteId := 0
431
432	// id part: anything but a newline between brackets
433	if data[i] != '[' {
434		return 0
435	}
436	i++
437	if p.flags&EXTENSION_FOOTNOTES != 0 {
438		if data[i] == '^' {
439			// we can set it to anything here because the proper noteIds will
440			// be assigned later during the second pass. It just has to be != 0
441			noteId = 1
442			i++
443		}
444	}
445	idOffset := i
446	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
447		i++
448	}
449	if i >= len(data) || data[i] != ']' {
450		return 0
451	}
452	idEnd := i
453
454	// spacer: colon (space | tab)* newline? (space | tab)*
455	i++
456	if i >= len(data) || data[i] != ':' {
457		return 0
458	}
459	i++
460	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
461		i++
462	}
463	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
464		i++
465		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
466			i++
467		}
468	}
469	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
470		i++
471	}
472	if i >= len(data) {
473		return 0
474	}
475
476	var (
477		linkOffset, linkEnd   int
478		titleOffset, titleEnd int
479		lineEnd               int
480		raw                   []byte
481		hasBlock              bool
482	)
483
484	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
485		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
486		lineEnd = linkEnd
487	} else {
488		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
489	}
490	if lineEnd == 0 {
491		return 0
492	}
493
494	// a valid ref has been found
495
496	ref := &reference{
497		noteId:   noteId,
498		hasBlock: hasBlock,
499	}
500
501	if noteId > 0 {
502		// reusing the link field for the id since footnotes don't have links
503		ref.link = data[idOffset:idEnd]
504		// if footnote, it's not really a title, it's the contained text
505		ref.title = raw
506	} else {
507		ref.link = data[linkOffset:linkEnd]
508		ref.title = data[titleOffset:titleEnd]
509	}
510
511	// id matches are case-insensitive
512	id := string(bytes.ToLower(data[idOffset:idEnd]))
513	p.refs[id] = ref
514	return lineEnd
515}
516
517func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
518	// link: whitespace-free sequence, optionally between angle brackets
519	if data[i] == '<' {
520		i++
521	}
522	linkOffset = i
523	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
524		i++
525	}
526	linkEnd = i
527	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
528		linkOffset++
529		linkEnd--
530	}
531
532	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
533	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
534		i++
535	}
536	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
537		return
538	}
539
540	// compute end-of-line
541	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
542		lineEnd = i
543	}
544	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
545		lineEnd++
546	}
547
548	// optional (space|tab)* spacer after a newline
549	if lineEnd > 0 {
550		i = lineEnd + 1
551		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
552			i++
553		}
554	}
555
556	// optional title: any non-newline sequence enclosed in '"() alone on its line
557	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
558		i++
559		titleOffset = i
560
561		// look for EOL
562		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
563			i++
564		}
565		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
566			titleEnd = i + 1
567		} else {
568			titleEnd = i
569		}
570
571		// step back
572		i--
573		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
574			i--
575		}
576		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
577			lineEnd = titleEnd
578			titleEnd = i
579		}
580	}
581
582	return
583}
584
585// The first bit of this logic is the same as (*parser).listItem, but the rest
586// is much simpler. This function simply finds the entire block and shifts it
587// over by one tab if it is indeed a block (just returns the line if it's not).
588// blockEnd is the end of the section in the input buffer, and contents is the
589// extracted text that was shifted over one tab. It will need to be rendered at
590// the end of the document.
591func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
592	if i == 0 {
593		return
594	}
595
596	// skip leading whitespace on first line
597	for data[i] == ' ' {
598		i++
599	}
600
601	blockStart = i
602
603	// find the end of the line
604	blockEnd = i
605	for i < len(data) && data[i-1] != '\n' {
606		i++
607	}
608
609	// get working buffer
610	var raw bytes.Buffer
611
612	// put the first line into the working buffer
613	raw.Write(data[blockEnd:i])
614	blockEnd = i
615
616	// process the following lines
617	containsBlankLine := false
618
619gatherLines:
620	for blockEnd < len(data) {
621		i++
622
623		// find the end of this line
624		for i < len(data) && data[i-1] != '\n' {
625			i++
626		}
627
628		// if it is an empty line, guess that it is part of this item
629		// and move on to the next line
630		if p.isEmpty(data[blockEnd:i]) > 0 {
631			containsBlankLine = true
632			blockEnd = i
633			continue
634		}
635
636		n := 0
637		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
638			// this is the end of the block.
639			// we don't want to include this last line in the index.
640			break gatherLines
641		}
642
643		// if there were blank lines before this one, insert a new one now
644		if containsBlankLine {
645			raw.WriteByte('\n')
646			containsBlankLine = false
647		}
648
649		// get rid of that first tab, write to buffer
650		raw.Write(data[blockEnd+n : i])
651		hasBlock = true
652
653		blockEnd = i
654	}
655
656	if data[blockEnd-1] != '\n' {
657		raw.WriteByte('\n')
658	}
659
660	contents = raw.Bytes()
661
662	return
663}
664
665//
666//
667// Miscellaneous helper functions
668//
669//
670
671// Test if a character is a punctuation symbol.
672// Taken from a private function in regexp in the stdlib.
673func ispunct(c byte) bool {
674	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
675		if c == r {
676			return true
677		}
678	}
679	return false
680}
681
682// Test if a character is a whitespace character.
683func isspace(c byte) bool {
684	return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
685}
686
687// Test if a character is a letter or a digit.
688// TODO: check when this is looking for ASCII alnum and when it should use unicode
689func isalnum(c byte) bool {
690	return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
691}
692
693// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
694// always ends output with a newline
695func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
696	// first, check for common cases: no tabs, or only tabs at beginning of line
697	i, prefix := 0, 0
698	slowcase := false
699	for i = 0; i < len(line); i++ {
700		if line[i] == '\t' {
701			if prefix == i {
702				prefix++
703			} else {
704				slowcase = true
705				break
706			}
707		}
708	}
709
710	// no need to decode runes if all tabs are at the beginning of the line
711	if !slowcase {
712		for i = 0; i < prefix*tabSize; i++ {
713			out.WriteByte(' ')
714		}
715		out.Write(line[prefix:])
716		return
717	}
718
719	// the slow case: we need to count runes to figure out how
720	// many spaces to insert for each tab
721	column := 0
722	i = 0
723	for i < len(line) {
724		start := i
725		for i < len(line) && line[i] != '\t' {
726			_, size := utf8.DecodeRune(line[i:])
727			i += size
728			column++
729		}
730
731		if i > start {
732			out.Write(line[start:i])
733		}
734
735		if i >= len(line) {
736			break
737		}
738
739		for {
740			out.WriteByte(' ')
741			column++
742			if column%tabSize == 0 {
743				break
744			}
745		}
746
747		i++
748	}
749}
750
751// Find if a line counts as indented or not.
752// Returns number of characters the indent is (0 = not indented).
753func isIndented(data []byte, indentSize int) int {
754	if len(data) == 0 {
755		return 0
756	}
757	if data[0] == '\t' {
758		return 1
759	}
760	if len(data) < indentSize {
761		return 0
762	}
763	for i := 0; i < indentSize; i++ {
764		if data[i] != ' ' {
765			return 0
766		}
767	}
768	return indentSize
769}
770
771// Create a url-safe slug for fragments
772func slugify(in []byte) []byte {
773	if len(in) == 0 {
774		return in
775	}
776	out := make([]byte, 0, len(in))
777	sym := false
778
779	for _, ch := range in {
780		if isalnum(ch) {
781			sym = false
782			out = append(out, ch)
783		} else if sym {
784			continue
785		} else {
786			out = append(out, '-')
787			sym = true
788		}
789	}
790	var a, b int
791	var ch byte
792	for a, ch = range out {
793		if ch != '-' {
794			break
795		}
796	}
797	for b = len(out) - 1; b > 0; b-- {
798		if out[b] != '-' {
799			break
800		}
801	}
802	return out[a : b+1]
803}