diff --git a/lib/marked.esm.js b/lib/marked.esm.js index 40d25560da..50858ef6d9 100644 --- a/lib/marked.esm.js +++ b/lib/marked.esm.js @@ -9,9 +9,13 @@ * The code in this file is generated from files in ./src/ */ -var defaults$5 = {exports: {}}; +function createCommonjsModule(fn) { + var module = { exports: {} }; + return fn(module, module.exports), module.exports; +} -function getDefaults$1() { +var defaults = createCommonjsModule(function (module) { +function getDefaults() { return { baseUrl: null, breaks: false, @@ -35,20 +39,20 @@ function getDefaults$1() { }; } -function changeDefaults$1(newDefaults) { - defaults$5.exports.defaults = newDefaults; +function changeDefaults(newDefaults) { + module.exports.defaults = newDefaults; } -defaults$5.exports = { - defaults: getDefaults$1(), - getDefaults: getDefaults$1, - changeDefaults: changeDefaults$1 +module.exports = { + defaults: getDefaults(), + getDefaults, + changeDefaults }; +}); /** * Helpers */ - const escapeTest = /[&<>"']/; const escapeReplace = /[&<>"']/g; const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/; @@ -61,7 +65,7 @@ const escapeReplacements = { "'": ''' }; const getEscapeReplacement = (ch) => escapeReplacements[ch]; -function escape$3(html, encode) { +function escape(html, encode) { if (encode) { if (escapeTest.test(html)) { return html.replace(escapeReplace, getEscapeReplacement); @@ -77,7 +81,7 @@ function escape$3(html, encode) { const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig; -function unescape$1(html) { +function unescape(html) { // explicitly match decimal, hex, and named HTML entities return html.replace(unescapeTest, (_, n) => { n = n.toLowerCase(); @@ -92,7 +96,7 @@ function unescape$1(html) { } const caret = /(^|[^\[])\^/g; -function edit$1(regex, opt) { +function edit(regex, opt) { regex = regex.source || regex; opt = opt || ''; const obj = { @@ -111,11 +115,11 @@ function edit$1(regex, opt) { const nonWordAndColonTest = /[^\w:]/g; const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i; -function cleanUrl$1(sanitize, base, href) { +function cleanUrl(sanitize, base, href) { if (sanitize) { let prot; try { - prot = decodeURIComponent(unescape$1(href)) + prot = decodeURIComponent(unescape(href)) .replace(nonWordAndColonTest, '') .toLowerCase(); } catch (e) { @@ -149,7 +153,7 @@ function resolveUrl(base, href) { if (justDomain.test(base)) { baseUrls[' ' + base] = base + '/'; } else { - baseUrls[' ' + base] = rtrim$1(base, '/', true); + baseUrls[' ' + base] = rtrim(base, '/', true); } } base = baseUrls[' ' + base]; @@ -170,9 +174,9 @@ function resolveUrl(base, href) { } } -const noopTest$1 = { exec: function noopTest() {} }; +const noopTest = { exec: function noopTest() {} }; -function merge$2(obj) { +function merge(obj) { let i = 1, target, key; @@ -189,7 +193,7 @@ function merge$2(obj) { return obj; } -function splitCells$1(tableRow, count) { +function splitCells(tableRow, count) { // ensure that every cell-delimiting pipe has a space // before it to distinguish it from an escaped pipe const row = tableRow.replace(/\|/g, (match, offset, str) => { @@ -208,6 +212,10 @@ function splitCells$1(tableRow, count) { cells = row.split(/ \|/); let i = 0; + // First/last cell in a row cannot be empty if it has no leading/trailing pipe + if (!cells[0].trim()) { cells.shift(); } + if (!cells[cells.length - 1].trim()) { cells.pop(); } + if (cells.length > count) { cells.splice(count); } else { @@ -224,7 +232,7 @@ function splitCells$1(tableRow, count) { // Remove trailing 'c's. Equivalent to str.replace(/c*$/, ''). // /c*$/ is vulnerable to REDOS. // invert: Remove suffix of non-c chars instead. Default falsey. -function rtrim$1(str, c, invert) { +function rtrim(str, c, invert) { const l = str.length; if (l === 0) { return ''; @@ -248,7 +256,7 @@ function rtrim$1(str, c, invert) { return str.substr(0, l - suffLen); } -function findClosingBracket$1(str, b) { +function findClosingBracket(str, b) { if (str.indexOf(b[1]) === -1) { return -1; } @@ -270,14 +278,14 @@ function findClosingBracket$1(str, b) { return -1; } -function checkSanitizeDeprecation$1(opt) { +function checkSanitizeDeprecation(opt) { if (opt && opt.sanitize && !opt.silent) { console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options'); } } // copied from https://stackoverflow.com/a/5450113/806777 -function repeatString$1(pattern, count) { +function repeatString(pattern, count) { if (count < 1) { return ''; } @@ -293,40 +301,42 @@ function repeatString$1(pattern, count) { } var helpers = { - escape: escape$3, - unescape: unescape$1, - edit: edit$1, - cleanUrl: cleanUrl$1, + escape, + unescape, + edit, + cleanUrl, resolveUrl, - noopTest: noopTest$1, - merge: merge$2, - splitCells: splitCells$1, - rtrim: rtrim$1, - findClosingBracket: findClosingBracket$1, - checkSanitizeDeprecation: checkSanitizeDeprecation$1, - repeatString: repeatString$1 + noopTest, + merge, + splitCells, + rtrim, + findClosingBracket, + checkSanitizeDeprecation, + repeatString }; -const { defaults: defaults$4 } = defaults$5.exports; +const { defaults: defaults$1 } = defaults; const { - rtrim, - splitCells, - escape: escape$2, - findClosingBracket + rtrim: rtrim$1, + splitCells: splitCells$1, + escape: escape$1, + findClosingBracket: findClosingBracket$1 } = helpers; -function outputLink(cap, link, raw) { +function outputLink(cap, link, raw, lexer) { const href = link.href; - const title = link.title ? escape$2(link.title) : null; + const title = link.title ? escape$1(link.title) : null; const text = cap[1].replace(/\\([\[\]])/g, '$1'); if (cap[0].charAt(0) !== '!') { + lexer.state.inLink = true; return { type: 'link', raw, href, title, - text + text, + tokens: lexer.inlineTokens(text, []) }; } else { return { @@ -334,7 +344,7 @@ function outputLink(cap, link, raw) { raw, href, title, - text: escape$2(text) + text: escape$1(text) }; } } @@ -372,7 +382,7 @@ function indentCodeCompensation(raw, text) { */ var Tokenizer_1 = class Tokenizer { constructor(options) { - this.options = options || defaults$4; + this.options = options || defaults$1; } space(src) { @@ -397,7 +407,7 @@ var Tokenizer_1 = class Tokenizer { raw: cap[0], codeBlockStyle: 'indented', text: !this.options.pedantic - ? rtrim(text, '\n') + ? rtrim$1(text, '\n') : text }; } @@ -425,7 +435,7 @@ var Tokenizer_1 = class Tokenizer { // remove trailing #s if (/#$/.test(text)) { - const trimmed = rtrim(text, '#'); + const trimmed = rtrim$1(text, '#'); if (this.options.pedantic) { text = trimmed.trim(); } else if (!trimmed || / $/.test(trimmed)) { @@ -434,48 +444,15 @@ var Tokenizer_1 = class Tokenizer { } } - return { + const token = { type: 'heading', raw: cap[0], depth: cap[1].length, - text: text + text: text, + tokens: [] }; - } - } - - nptable(src) { - const cap = this.rules.block.nptable.exec(src); - if (cap) { - const item = { - type: 'table', - header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')), - align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */), - cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [], - raw: cap[0] - }; - - if (item.header.length === item.align.length) { - let l = item.align.length; - let i; - for (i = 0; i < l; i++) { - if (/^ *-+: *$/.test(item.align[i])) { - item.align[i] = 'right'; - } else if (/^ *:-+: *$/.test(item.align[i])) { - item.align[i] = 'center'; - } else if (/^ *:-+ *$/.test(item.align[i])) { - item.align[i] = 'left'; - } else { - item.align[i] = null; - } - } - - l = item.cells.length; - for (i = 0; i < l; i++) { - item.cells[i] = splitCells(item.cells[i], item.header.length); - } - - return item; - } + this.lexer.inline(token.text, token.tokens); + return token; } } @@ -497,6 +474,7 @@ var Tokenizer_1 = class Tokenizer { return { type: 'blockquote', raw: cap[0], + tokens: this.lexer.blockTokens(text, []), text }; } @@ -593,7 +571,7 @@ var Tokenizer_1 = class Tokenizer { } // trim item newlines at end - item = rtrim(item, '\n'); + item = rtrim$1(item, '\n'); if (i !== l - 1) { raw = raw + '\n'; } @@ -621,16 +599,28 @@ var Tokenizer_1 = class Tokenizer { } } - list.items.push({ + this.lexer.state.top = false; + + const token = { type: 'list_item', raw, task: istask, checked: ischecked, loose: loose, - text: item - }); + text: item, + tokens: this.lexer.blockTokens(item, []) + }; + + // this.lexer.inline(token.text, ) + list.items.push(token); } + // l2 = token.items.length; + // for (j = 0; j < l2; j++) { + // this.inline(token.items[j].tokens); + // } + // break; + return list; } } @@ -638,15 +628,20 @@ var Tokenizer_1 = class Tokenizer { html(src) { const cap = this.rules.block.html.exec(src); if (cap) { - return { - type: this.options.sanitize - ? 'paragraph' - : 'html', + const token = { + type: 'html', raw: cap[0], pre: !this.options.sanitizer && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'), - text: this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0] + text: cap[0] }; + if (this.options.sanitize) { + token.type = 'paragraph'; + token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$1(cap[0]); + token.tokens = []; + this.lexer.inline(token.text, token.tokens); + } + return token; } } @@ -670,16 +665,20 @@ var Tokenizer_1 = class Tokenizer { if (cap) { const item = { type: 'table', - header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')), + header: { + text: splitCells$1(cap[1]) + }, align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */), - cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [] + cells: { + text: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [] + } }; - if (item.header.length === item.align.length) { + if (item.header.text.length === item.align.length) { item.raw = cap[0]; let l = item.align.length; - let i; + let i, j, k, row; for (i = 0; i < l; i++) { if (/^ *-+: *$/.test(item.align[i])) { item.align[i] = 'right'; @@ -692,11 +691,31 @@ var Tokenizer_1 = class Tokenizer { } } - l = item.cells.length; + l = item.cells.text.length; for (i = 0; i < l; i++) { - item.cells[i] = splitCells( - item.cells[i].replace(/^ *\| *| *\| *$/g, ''), - item.header.length); + item.cells.text[i] = splitCells$1(item.cells.text[i], item.header.text.length); + } + + // parse child tokens inside headers and cells + item.header.tokens = []; + item.cells.tokens = []; + + // header child tokens + l = item.header.text.length; + for (j = 0; j < l; j++) { + item.header.tokens[j] = []; + this.lexer.inlineTokens(item.header.text[j], item.header.tokens[j]); + } + + // cell child tokens + l = item.cells.text.length; + for (j = 0; j < l; j++) { + row = item.cells.text[j]; + item.cells.tokens[j] = []; + for (k = 0; k < row.length; k++) { + item.cells.tokens[j][k] = []; + this.lexer.inlineTokens(row[k], item.cells.tokens[j][k]); + } } return item; @@ -707,36 +726,45 @@ var Tokenizer_1 = class Tokenizer { lheading(src) { const cap = this.rules.block.lheading.exec(src); if (cap) { - return { + const token = { type: 'heading', raw: cap[0], depth: cap[2].charAt(0) === '=' ? 1 : 2, - text: cap[1] + text: cap[1], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } } paragraph(src) { const cap = this.rules.block.paragraph.exec(src); if (cap) { - return { + const token = { type: 'paragraph', raw: cap[0], text: cap[1].charAt(cap[1].length - 1) === '\n' ? cap[1].slice(0, -1) - : cap[1] + : cap[1], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } } text(src) { const cap = this.rules.block.text.exec(src); if (cap) { - return { + const token = { type: 'text', raw: cap[0], - text: cap[0] + text: cap[0], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } } @@ -746,23 +774,23 @@ var Tokenizer_1 = class Tokenizer { return { type: 'escape', raw: cap[0], - text: escape$2(cap[1]) + text: escape$1(cap[1]) }; } } - tag(src, inLink, inRawBlock) { + tag(src) { const cap = this.rules.inline.tag.exec(src); if (cap) { - if (!inLink && /^/i.test(cap[0])) { - inLink = false; + if (!this.lexer.state.inLink && /^/i.test(cap[0])) { + this.lexer.state.inLink = false; } - if (!inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { - inRawBlock = true; - } else if (inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { - inRawBlock = false; + if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { + this.lexer.state.inRawBlock = true; + } else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { + this.lexer.state.inRawBlock = false; } return { @@ -770,12 +798,12 @@ var Tokenizer_1 = class Tokenizer { ? 'text' : 'html', raw: cap[0], - inLink, - inRawBlock, + inLink: this.lexer.state.inLink, + inRawBlock: this.lexer.state.inRawBlock, text: this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) - : escape$2(cap[0])) + : escape$1(cap[0])) : cap[0] }; } @@ -792,13 +820,13 @@ var Tokenizer_1 = class Tokenizer { } // ending angle bracket cannot be escaped - const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\'); + const rtrimSlash = rtrim$1(trimmedUrl.slice(0, -1), '\\'); if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) { return; } } else { // find closing parenthesis - const lastParenIndex = findClosingBracket(cap[2], '()'); + const lastParenIndex = findClosingBracket$1(cap[2], '()'); if (lastParenIndex > -1) { const start = cap[0].indexOf('!') === 0 ? 5 : 4; const linkLen = start + cap[1].length + lastParenIndex; @@ -833,7 +861,7 @@ var Tokenizer_1 = class Tokenizer { return outputLink(cap, { href: href ? href.replace(this.rules.inline._escapes, '$1') : href, title: title ? title.replace(this.rules.inline._escapes, '$1') : title - }, cap[0]); + }, cap[0], this.lexer); } } @@ -851,7 +879,7 @@ var Tokenizer_1 = class Tokenizer { text }; } - return outputLink(cap, link, cap[0]); + return outputLink(cap, link, cap[0], this.lexer); } } @@ -900,18 +928,22 @@ var Tokenizer_1 = class Tokenizer { // Create `em` if smallest delimiter has odd char count. *a*** if (Math.min(lLength, rLength) % 2) { + const text = src.slice(1, lLength + match.index + rLength); return { type: 'em', raw: src.slice(0, lLength + match.index + rLength + 1), - text: src.slice(1, lLength + match.index + rLength) + text, + tokens: this.lexer.inlineTokens(text, []) }; } // Create 'strong' if smallest delimiter has even char count. **a*** + const text = src.slice(2, lLength + match.index + rLength - 1); return { type: 'strong', raw: src.slice(0, lLength + match.index + rLength + 1), - text: src.slice(2, lLength + match.index + rLength - 1) + text, + tokens: this.lexer.inlineTokens(text, []) }; } } @@ -926,7 +958,7 @@ var Tokenizer_1 = class Tokenizer { if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) { text = text.substring(1, text.length - 1); } - text = escape$2(text, true); + text = escape$1(text, true); return { type: 'codespan', raw: cap[0], @@ -951,7 +983,8 @@ var Tokenizer_1 = class Tokenizer { return { type: 'del', raw: cap[0], - text: cap[2] + text: cap[2], + tokens: this.lexer.inlineTokens(cap[2], []) }; } } @@ -961,10 +994,10 @@ var Tokenizer_1 = class Tokenizer { if (cap) { let text, href; if (cap[2] === '@') { - text = escape$2(this.options.mangle ? mangle(cap[1]) : cap[1]); + text = escape$1(this.options.mangle ? mangle(cap[1]) : cap[1]); href = 'mailto:' + text; } else { - text = escape$2(cap[1]); + text = escape$1(cap[1]); href = text; } @@ -989,7 +1022,7 @@ var Tokenizer_1 = class Tokenizer { if (cap = this.rules.inline.url.exec(src)) { let text, href; if (cap[2] === '@') { - text = escape$2(this.options.mangle ? mangle(cap[0]) : cap[0]); + text = escape$1(this.options.mangle ? mangle(cap[0]) : cap[0]); href = 'mailto:' + text; } else { // do extended autolink path validation @@ -998,7 +1031,7 @@ var Tokenizer_1 = class Tokenizer { prevCapZero = cap[0]; cap[0] = this.rules.inline._backpedal.exec(cap[0])[0]; } while (prevCapZero !== cap[0]); - text = escape$2(cap[0]); + text = escape$1(cap[0]); if (cap[1] === 'www.') { href = 'http://' + text; } else { @@ -1021,14 +1054,14 @@ var Tokenizer_1 = class Tokenizer { } } - inlineText(src, inRawBlock, smartypants) { + inlineText(src, smartypants) { const cap = this.rules.inline.text.exec(src); if (cap) { let text; - if (inRawBlock) { - text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0]; + if (this.lexer.state.inRawBlock) { + text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$1(cap[0])) : cap[0]; } else { - text = escape$2(this.options.smartypants ? smartypants(cap[0]) : cap[0]); + text = escape$1(this.options.smartypants ? smartypants(cap[0]) : cap[0]); } return { type: 'text', @@ -1040,15 +1073,15 @@ var Tokenizer_1 = class Tokenizer { }; const { - noopTest, - edit, + noopTest: noopTest$1, + edit: edit$1, merge: merge$1 } = helpers; /** * Block-Level Grammar */ -const block$1 = { +const block = { newline: /^(?: *(?:\n|$))+/, code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/, fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/, @@ -1067,8 +1100,7 @@ const block$1 = { + '|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag + ')', def: /^ {0,3}\[(label)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/, - nptable: noopTest, - table: noopTest, + table: noopTest$1, lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/, // regex template, placeholders will be replaced according to different paragraph // interruption rules of commonmark and the original markdown spec: @@ -1076,108 +1108,94 @@ const block$1 = { text: /^[^\n]+/ }; -block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/; -block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/; -block$1.def = edit(block$1.def) - .replace('label', block$1._label) - .replace('title', block$1._title) +block._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/; +block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/; +block.def = edit$1(block.def) + .replace('label', block._label) + .replace('title', block._title) .getRegex(); -block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/; -block$1.item = /^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/; -block$1.item = edit(block$1.item, 'gm') - .replace(/bull/g, block$1.bullet) +block.bullet = /(?:[*+-]|\d{1,9}[.)])/; +block.item = /^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/; +block.item = edit$1(block.item, 'gm') + .replace(/bull/g, block.bullet) .getRegex(); -block$1.listItemStart = edit(/^( *)(bull) */) - .replace('bull', block$1.bullet) +block.listItemStart = edit$1(/^( *)(bull) */) + .replace('bull', block.bullet) .getRegex(); -block$1.list = edit(block$1.list) - .replace(/bull/g, block$1.bullet) +block.list = edit$1(block.list) + .replace(/bull/g, block.bullet) .replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))') - .replace('def', '\\n+(?=' + block$1.def.source + ')') + .replace('def', '\\n+(?=' + block.def.source + ')') .getRegex(); -block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption' +block._tag = 'address|article|aside|base|basefont|blockquote|body|caption' + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption' + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe' + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option' + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr' + '|track|ul'; -block$1._comment = /|$)/; -block$1.html = edit(block$1.html, 'i') - .replace('comment', block$1._comment) - .replace('tag', block$1._tag) +block._comment = /|$)/; +block.html = edit$1(block.html, 'i') + .replace('comment', block._comment) + .replace('tag', block._tag) .replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/) .getRegex(); -block$1.paragraph = edit(block$1._paragraph) - .replace('hr', block$1.hr) +block.paragraph = edit$1(block._paragraph) + .replace('hr', block.hr) .replace('heading', ' {0,3}#{1,6} ') .replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs .replace('blockquote', ' {0,3}>') .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n') .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt .replace('html', ')|<(?:script|pre|style|textarea|!--)') - .replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks + .replace('tag', block._tag) // pars can be interrupted by type (6) html blocks .getRegex(); -block$1.blockquote = edit(block$1.blockquote) - .replace('paragraph', block$1.paragraph) +block.blockquote = edit$1(block.blockquote) + .replace('paragraph', block.paragraph) .getRegex(); /** * Normal Block Grammar */ -block$1.normal = merge$1({}, block$1); +block.normal = merge$1({}, block); /** * GFM Block Grammar */ -block$1.gfm = merge$1({}, block$1.normal, { - nptable: '^ *([^|\\n ].*\\|.*)\\n' // Header - + ' {0,3}([-:]+ *\\|[-| :]*)' // Align - + '(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)', // Cells - table: '^ *\\|(.+)\\n' // Header - + ' {0,3}\\|?( *[-:]+[-| :]*)' // Align +block.gfm = merge$1({}, block.normal, { + table: '^ *([^\\n ].*\\|.*)\\n' // Header + + ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)\\|?' // Align + '(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells }); -block$1.gfm.nptable = edit(block$1.gfm.nptable) - .replace('hr', block$1.hr) +block.gfm.table = edit$1(block.gfm.table) + .replace('hr', block.hr) .replace('heading', ' {0,3}#{1,6} ') .replace('blockquote', ' {0,3}>') .replace('code', ' {4}[^\\n]') .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n') .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt .replace('html', ')|<(?:script|pre|style|textarea|!--)') - .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks - .getRegex(); - -block$1.gfm.table = edit(block$1.gfm.table) - .replace('hr', block$1.hr) - .replace('heading', ' {0,3}#{1,6} ') - .replace('blockquote', ' {0,3}>') - .replace('code', ' {4}[^\\n]') - .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n') - .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt - .replace('html', ')|<(?:script|pre|style|textarea|!--)') - .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks + .replace('tag', block._tag) // tables can be interrupted by type (6) html blocks .getRegex(); /** * Pedantic grammar (original John Gruber's loose markdown specification) */ -block$1.pedantic = merge$1({}, block$1.normal, { - html: edit( +block.pedantic = merge$1({}, block.normal, { + html: edit$1( '^ *(?:comment *(?:\\n|\\s*$)' + '|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)' // closed tag + '|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))') - .replace('comment', block$1._comment) + .replace('comment', block._comment) .replace(/tag/g, '(?!(?:' + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub' + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)' @@ -1185,11 +1203,11 @@ block$1.pedantic = merge$1({}, block$1.normal, { .getRegex(), def: /^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/, heading: /^(#{1,6})(.*)(?:\n+|$)/, - fences: noopTest, // fences not supported - paragraph: edit(block$1.normal._paragraph) - .replace('hr', block$1.hr) + fences: noopTest$1, // fences not supported + paragraph: edit$1(block.normal._paragraph) + .replace('hr', block.hr) .replace('heading', ' *#{1,6} *[^\n]') - .replace('lheading', block$1.lheading) + .replace('lheading', block.lheading) .replace('blockquote', ' {0,3}>') .replace('|fences', '') .replace('|list', '') @@ -1200,10 +1218,10 @@ block$1.pedantic = merge$1({}, block$1.normal, { /** * Inline-Level Grammar */ -const inline$1 = { +const inline = { escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/, autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/, - url: noopTest, + url: noopTest$1, tag: '^comment' + '|^' // self-closing tag + '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag @@ -1223,80 +1241,80 @@ const inline$1 = { }, code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/, br: /^( {2,}|\\)\n(?!\s*$)/, - del: noopTest, + del: noopTest$1, text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\?@\\[\\]`^{|}~'; -inline$1.punctuation = edit(inline$1.punctuation).replace(/punctuation/g, inline$1._punctuation).getRegex(); +inline._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~'; +inline.punctuation = edit$1(inline.punctuation).replace(/punctuation/g, inline._punctuation).getRegex(); // sequences em should skip over [title](link), `code`, -inline$1.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g; -inline$1.escapedEmSt = /\\\*|\\_/g; +inline.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g; +inline.escapedEmSt = /\\\*|\\_/g; -inline$1._comment = edit(block$1._comment).replace('(?:-->|$)', '-->').getRegex(); +inline._comment = edit$1(block._comment).replace('(?:-->|$)', '-->').getRegex(); -inline$1.emStrong.lDelim = edit(inline$1.emStrong.lDelim) - .replace(/punct/g, inline$1._punctuation) +inline.emStrong.lDelim = edit$1(inline.emStrong.lDelim) + .replace(/punct/g, inline._punctuation) .getRegex(); -inline$1.emStrong.rDelimAst = edit(inline$1.emStrong.rDelimAst, 'g') - .replace(/punct/g, inline$1._punctuation) +inline.emStrong.rDelimAst = edit$1(inline.emStrong.rDelimAst, 'g') + .replace(/punct/g, inline._punctuation) .getRegex(); -inline$1.emStrong.rDelimUnd = edit(inline$1.emStrong.rDelimUnd, 'g') - .replace(/punct/g, inline$1._punctuation) +inline.emStrong.rDelimUnd = edit$1(inline.emStrong.rDelimUnd, 'g') + .replace(/punct/g, inline._punctuation) .getRegex(); -inline$1._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g; +inline._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g; -inline$1._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/; -inline$1._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/; -inline$1.autolink = edit(inline$1.autolink) - .replace('scheme', inline$1._scheme) - .replace('email', inline$1._email) +inline._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/; +inline._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/; +inline.autolink = edit$1(inline.autolink) + .replace('scheme', inline._scheme) + .replace('email', inline._email) .getRegex(); -inline$1._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/; +inline._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/; -inline$1.tag = edit(inline$1.tag) - .replace('comment', inline$1._comment) - .replace('attribute', inline$1._attribute) +inline.tag = edit$1(inline.tag) + .replace('comment', inline._comment) + .replace('attribute', inline._attribute) .getRegex(); -inline$1._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/; -inline$1._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/; -inline$1._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/; +inline._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/; +inline._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/; +inline._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/; -inline$1.link = edit(inline$1.link) - .replace('label', inline$1._label) - .replace('href', inline$1._href) - .replace('title', inline$1._title) +inline.link = edit$1(inline.link) + .replace('label', inline._label) + .replace('href', inline._href) + .replace('title', inline._title) .getRegex(); -inline$1.reflink = edit(inline$1.reflink) - .replace('label', inline$1._label) +inline.reflink = edit$1(inline.reflink) + .replace('label', inline._label) .getRegex(); -inline$1.reflinkSearch = edit(inline$1.reflinkSearch, 'g') - .replace('reflink', inline$1.reflink) - .replace('nolink', inline$1.nolink) +inline.reflinkSearch = edit$1(inline.reflinkSearch, 'g') + .replace('reflink', inline.reflink) + .replace('nolink', inline.nolink) .getRegex(); /** * Normal Inline Grammar */ -inline$1.normal = merge$1({}, inline$1); +inline.normal = merge$1({}, inline); /** * Pedantic Inline Grammar */ -inline$1.pedantic = merge$1({}, inline$1.normal, { +inline.pedantic = merge$1({}, inline.normal, { strong: { start: /^__|\*\*/, middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/, @@ -1309,11 +1327,11 @@ inline$1.pedantic = merge$1({}, inline$1.normal, { endAst: /\*(?!\*)/g, endUnd: /_(?!_)/g }, - link: edit(/^!?\[(label)\]\((.*?)\)/) - .replace('label', inline$1._label) + link: edit$1(/^!?\[(label)\]\((.*?)\)/) + .replace('label', inline._label) .getRegex(), - reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/) - .replace('label', inline$1._label) + reflink: edit$1(/^!?\[(label)\]\s*\[([^\]]*)\]/) + .replace('label', inline._label) .getRegex() }); @@ -1321,8 +1339,8 @@ inline$1.pedantic = merge$1({}, inline$1.normal, { * GFM Inline Grammar */ -inline$1.gfm = merge$1({}, inline$1.normal, { - escape: edit(inline$1.escape).replace('])', '~|])').getRegex(), +inline.gfm = merge$1({}, inline.normal, { + escape: edit$1(inline.escape).replace('])', '~|])').getRegex(), _extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/, url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/, _backpedal: /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/, @@ -1330,30 +1348,29 @@ inline$1.gfm = merge$1({}, inline$1.normal, { text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\ { - if (token = extTokenizer.call(this, src, tokens)) { + if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); tokens.push(token); return true; @@ -1508,6 +1535,8 @@ var Lexer_1 = class Lexer { if (lastToken && lastToken.type === 'paragraph') { lastToken.raw += '\n' + token.raw; lastToken.text += '\n' + token.text; + this.inlineQueue.pop(); + this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text; } else { tokens.push(token); } @@ -1528,13 +1557,6 @@ var Lexer_1 = class Lexer { continue; } - // table no leading pipe (gfm) - if (token = this.tokenizer.nptable(src)) { - src = src.substring(token.raw.length); - tokens.push(token); - continue; - } - // hr if (token = this.tokenizer.hr(src)) { src = src.substring(token.raw.length); @@ -1545,7 +1567,6 @@ var Lexer_1 = class Lexer { // blockquote if (token = this.tokenizer.blockquote(src)) { src = src.substring(token.raw.length); - token.tokens = this.blockTokens(token.text, [], top); tokens.push(token); continue; } @@ -1553,10 +1574,6 @@ var Lexer_1 = class Lexer { // list if (token = this.tokenizer.list(src)) { src = src.substring(token.raw.length); - l = token.items.length; - for (i = 0; i < l; i++) { - token.items[i].tokens = this.blockTokens(token.items[i].text, [], false); - } tokens.push(token); continue; } @@ -1569,7 +1586,7 @@ var Lexer_1 = class Lexer { } // def - if (top && (token = this.tokenizer.def(src))) { + if (this.state.top && (token = this.tokenizer.def(src))) { src = src.substring(token.raw.length); if (!this.tokens.links[token.tag]) { this.tokens.links[token.tag] = { @@ -1602,18 +1619,20 @@ var Lexer_1 = class Lexer { const tempSrc = src.slice(1); let tempStart; this.options.extensions.startBlock.forEach(function(getStartIndex) { - tempStart = getStartIndex.call(this, tempSrc); + tempStart = getStartIndex.call({ lexer: this }, tempSrc); if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); } }); if (startIndex < Infinity && startIndex >= 0) { cutSrc = src.substring(0, startIndex + 1); } } - if (top && (token = this.tokenizer.paragraph(cutSrc))) { + if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) { lastToken = tokens[tokens.length - 1]; if (lastParagraphClipped && lastToken.type === 'paragraph') { lastToken.raw += '\n' + token.raw; lastToken.text += '\n' + token.text; + this.inlineQueue.pop(); + this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text; } else { tokens.push(token); } @@ -1629,6 +1648,8 @@ var Lexer_1 = class Lexer { if (lastToken && lastToken.type === 'text') { lastToken.raw += '\n' + token.raw; lastToken.text += '\n' + token.text; + this.inlineQueue.pop(); + this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text; } else { tokens.push(token); } @@ -1646,75 +1667,18 @@ var Lexer_1 = class Lexer { } } + this.state.top = true; return tokens; } - inline(tokens) { - let i, - j, - k, - l2, - row, - token; - - const l = tokens.length; - for (i = 0; i < l; i++) { - token = tokens[i]; - switch (token.type) { - case 'paragraph': - case 'text': - case 'heading': { - token.tokens = []; - this.inlineTokens(token.text, token.tokens); - break; - } - case 'table': { - token.tokens = { - header: [], - cells: [] - }; - - // header - l2 = token.header.length; - for (j = 0; j < l2; j++) { - token.tokens.header[j] = []; - this.inlineTokens(token.header[j], token.tokens.header[j]); - } - - // cells - l2 = token.cells.length; - for (j = 0; j < l2; j++) { - row = token.cells[j]; - token.tokens.cells[j] = []; - for (k = 0; k < row.length; k++) { - token.tokens.cells[j][k] = []; - this.inlineTokens(row[k], token.tokens.cells[j][k]); - } - } - - break; - } - case 'blockquote': { - this.inline(token.tokens); - break; - } - case 'list': { - l2 = token.items.length; - for (j = 0; j < l2; j++) { - this.inline(token.items[j].tokens); - } - break; - } - } - } - - return tokens; + inline(src, tokens) { + this.inlineQueue.push({ src, tokens }); } /** * Lexing/Compiling */ - inlineTokens(src, tokens = [], inLink = false, inRawBlock = false) { + inlineTokens(src, tokens = []) { let token, lastToken, cutSrc; // String with links masked to avoid interference with em and strong @@ -1728,14 +1692,14 @@ var Lexer_1 = class Lexer { if (links.length > 0) { while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) { if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) { - maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex); + maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString$1('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex); } } } } // Mask out other blocks while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) { - maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex); + maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString$1('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex); } // Mask out escaped em & strong delimiters @@ -1753,7 +1717,7 @@ var Lexer_1 = class Lexer { if (this.options.extensions && this.options.extensions.inline && this.options.extensions.inline.some((extTokenizer) => { - if (token = extTokenizer.call(this, src, tokens)) { + if (token = extTokenizer.call({ lexer: this }, src, tokens)) { src = src.substring(token.raw.length); tokens.push(token); return true; @@ -1771,10 +1735,8 @@ var Lexer_1 = class Lexer { } // tag - if (token = this.tokenizer.tag(src, inLink, inRawBlock)) { + if (token = this.tokenizer.tag(src)) { src = src.substring(token.raw.length); - inLink = token.inLink; - inRawBlock = token.inRawBlock; lastToken = tokens[tokens.length - 1]; if (lastToken && token.type === 'text' && lastToken.type === 'text') { lastToken.raw += token.raw; @@ -1788,9 +1750,6 @@ var Lexer_1 = class Lexer { // link if (token = this.tokenizer.link(src)) { src = src.substring(token.raw.length); - if (token.type === 'link') { - token.tokens = this.inlineTokens(token.text, [], true, inRawBlock); - } tokens.push(token); continue; } @@ -1799,10 +1758,7 @@ var Lexer_1 = class Lexer { if (token = this.tokenizer.reflink(src, this.tokens.links)) { src = src.substring(token.raw.length); lastToken = tokens[tokens.length - 1]; - if (token.type === 'link') { - token.tokens = this.inlineTokens(token.text, [], true, inRawBlock); - tokens.push(token); - } else if (lastToken && token.type === 'text' && lastToken.type === 'text') { + if (lastToken && token.type === 'text' && lastToken.type === 'text') { lastToken.raw += token.raw; lastToken.text += token.text; } else { @@ -1814,7 +1770,6 @@ var Lexer_1 = class Lexer { // em & strong if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) { src = src.substring(token.raw.length); - token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock); tokens.push(token); continue; } @@ -1836,7 +1791,6 @@ var Lexer_1 = class Lexer { // del (gfm) if (token = this.tokenizer.del(src)) { src = src.substring(token.raw.length); - token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock); tokens.push(token); continue; } @@ -1849,7 +1803,7 @@ var Lexer_1 = class Lexer { } // url (gfm) - if (!inLink && (token = this.tokenizer.url(src, mangle))) { + if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) { src = src.substring(token.raw.length); tokens.push(token); continue; @@ -1863,14 +1817,14 @@ var Lexer_1 = class Lexer { const tempSrc = src.slice(1); let tempStart; this.options.extensions.startInline.forEach(function(getStartIndex) { - tempStart = getStartIndex.call(this, tempSrc); + tempStart = getStartIndex.call({ lexer: this }, tempSrc); if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); } }); if (startIndex < Infinity && startIndex >= 0) { cutSrc = src.substring(0, startIndex + 1); } } - if (token = this.tokenizer.inlineText(cutSrc, inRawBlock, smartypants)) { + if (token = this.tokenizer.inlineText(cutSrc, smartypants)) { src = src.substring(token.raw.length); if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started prevChar = token.raw.slice(-1); @@ -1901,10 +1855,10 @@ var Lexer_1 = class Lexer { } }; -const { defaults: defaults$2 } = defaults$5.exports; +const { defaults: defaults$3 } = defaults; const { - cleanUrl, - escape: escape$1 + cleanUrl: cleanUrl$1, + escape: escape$2 } = helpers; /** @@ -1912,7 +1866,7 @@ const { */ var Renderer_1 = class Renderer { constructor(options) { - this.options = options || defaults$2; + this.options = options || defaults$3; } code(code, infostring, escaped) { @@ -1929,15 +1883,15 @@ var Renderer_1 = class Renderer { if (!lang) { return '
'
-        + (escaped ? code : escape$1(code, true))
+        + (escaped ? code : escape$2(code, true))
         + '
\n'; } return '
'
-      + (escaped ? code : escape$1(code, true))
+      + (escaped ? code : escape$2(code, true))
       + '
\n'; } @@ -2037,11 +1991,11 @@ var Renderer_1 = class Renderer { } link(href, title, text) { - href = cleanUrl(this.options.sanitize, this.options.baseUrl, href); + href = cleanUrl$1(this.options.sanitize, this.options.baseUrl, href); if (href === null) { return text; } - let out = '
An error occurred:

'
-        + escape(e.message + '', true)
+        + escape$3(e.message + '', true)
         + '
'; } throw e; @@ -2580,21 +2523,21 @@ function marked(src, opt, callback) { marked.options = marked.setOptions = function(opt) { - merge(marked.defaults, opt); + merge$2(marked.defaults, opt); changeDefaults(marked.defaults); return marked; }; marked.getDefaults = getDefaults; -marked.defaults = defaults; +marked.defaults = defaults$5; /** * Use Extension */ marked.use = function(...args) { - const opts = merge({}, ...args); + const opts = merge$2({}, ...args); const extensions = marked.defaults.extensions || { renderers: {}, childTokens: {} }; let hasExtensions; @@ -2654,7 +2597,7 @@ marked.use = function(...args) { // ==-- Parse "overwrite" extensions --== // if (pack.renderer) { - const renderer = marked.defaults.renderer || new Renderer(); + const renderer = marked.defaults.renderer || new Renderer_1(); for (const prop in pack.renderer) { const prevRenderer = renderer[prop]; // Replace renderer with func to run extension, but fall back if false @@ -2669,7 +2612,7 @@ marked.use = function(...args) { opts.renderer = renderer; } if (pack.tokenizer) { - const tokenizer = marked.defaults.tokenizer || new Tokenizer(); + const tokenizer = marked.defaults.tokenizer || new Tokenizer_1(); for (const prop in pack.tokenizer) { const prevTokenizer = tokenizer[prop]; // Replace tokenizer with func to run extension, but fall back if false @@ -2712,10 +2655,10 @@ marked.walkTokens = function(tokens, callback) { callback(token); switch (token.type) { case 'table': { - for (const cell of token.tokens.header) { + for (const cell of token.header.tokens) { marked.walkTokens(cell, callback); } - for (const row of token.tokens.cells) { + for (const row of token.cells.tokens) { for (const cell of row) { marked.walkTokens(cell, callback); } @@ -2752,20 +2695,20 @@ marked.parseInline = function(src, opt) { + Object.prototype.toString.call(src) + ', string expected'); } - opt = merge({}, marked.defaults, opt || {}); - checkSanitizeDeprecation(opt); + opt = merge$2({}, marked.defaults, opt || {}); + checkSanitizeDeprecation$1(opt); try { - const tokens = Lexer.lexInline(src, opt); + const tokens = Lexer_1.lexInline(src, opt); if (opt.walkTokens) { marked.walkTokens(tokens, opt.walkTokens); } - return Parser.parseInline(tokens, opt); + return Parser_1.parseInline(tokens, opt); } catch (e) { e.message += '\nPlease report this to https://github.com/markedjs/marked.'; if (opt.silent) { return '

An error occurred:

'
-        + escape(e.message + '', true)
+        + escape$3(e.message + '', true)
         + '
'; } throw e; @@ -2776,18 +2719,18 @@ marked.parseInline = function(src, opt) { * Expose */ -marked.Parser = Parser; -marked.parser = Parser.parse; +marked.Parser = Parser_1; +marked.parser = Parser_1.parse; -marked.Renderer = Renderer; -marked.TextRenderer = TextRenderer; +marked.Renderer = Renderer_1; +marked.TextRenderer = TextRenderer_1; -marked.Lexer = Lexer; -marked.lexer = Lexer.lex; +marked.Lexer = Lexer_1; +marked.lexer = Lexer_1.lex; -marked.Tokenizer = Tokenizer; +marked.Tokenizer = Tokenizer_1; -marked.Slugger = Slugger; +marked.Slugger = Slugger_1; marked.parse = marked; diff --git a/lib/marked.js b/lib/marked.js index 10fe5d2815..8bf291ca32 100644 --- a/lib/marked.js +++ b/lib/marked.js @@ -49,61 +49,70 @@ } function _createForOfIteratorHelperLoose(o, allowArrayLike) { - var it = typeof Symbol !== "undefined" && o[Symbol.iterator] || o["@@iterator"]; - if (it) return (it = it.call(o)).next.bind(it); - - if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === "number") { - if (it) o = it; - var i = 0; - return function () { - if (i >= o.length) return { - done: true - }; - return { - done: false, - value: o[i++] + var it; + + if (typeof Symbol === "undefined" || o[Symbol.iterator] == null) { + if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === "number") { + if (it) o = it; + var i = 0; + return function () { + if (i >= o.length) return { + done: true + }; + return { + done: false, + value: o[i++] + }; }; - }; + } + + throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); } - throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); + it = o[Symbol.iterator](); + return it.next.bind(it); } - var defaults$5 = {exports: {}}; - - function getDefaults$1() { - return { - baseUrl: null, - breaks: false, - extensions: null, - gfm: true, - headerIds: true, - headerPrefix: '', - highlight: null, - langPrefix: 'language-', - mangle: true, - pedantic: false, - renderer: null, - sanitize: false, - sanitizer: null, - silent: false, - smartLists: false, - smartypants: false, - tokenizer: null, - walkTokens: null, - xhtml: false - }; + function createCommonjsModule(fn) { + var module = { exports: {} }; + return fn(module, module.exports), module.exports; } - function changeDefaults$1(newDefaults) { - defaults$5.exports.defaults = newDefaults; - } + var defaults = createCommonjsModule(function (module) { + function getDefaults() { + return { + baseUrl: null, + breaks: false, + extensions: null, + gfm: true, + headerIds: true, + headerPrefix: '', + highlight: null, + langPrefix: 'language-', + mangle: true, + pedantic: false, + renderer: null, + sanitize: false, + sanitizer: null, + silent: false, + smartLists: false, + smartypants: false, + tokenizer: null, + walkTokens: null, + xhtml: false + }; + } - defaults$5.exports = { - defaults: getDefaults$1(), - getDefaults: getDefaults$1, - changeDefaults: changeDefaults$1 - }; + function changeDefaults(newDefaults) { + module.exports.defaults = newDefaults; + } + + module.exports = { + defaults: getDefaults(), + getDefaults: getDefaults, + changeDefaults: changeDefaults + }; + }); /** * Helpers @@ -124,7 +133,7 @@ return escapeReplacements[ch]; }; - function escape$2(html, encode) { + function escape(html, encode) { if (encode) { if (escapeTest.test(html)) { return html.replace(escapeReplace, getEscapeReplacement); @@ -140,7 +149,7 @@ var unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig; - function unescape$1(html) { + function unescape(html) { // explicitly match decimal, hex, and named HTML entities return html.replace(unescapeTest, function (_, n) { n = n.toLowerCase(); @@ -156,7 +165,7 @@ var caret = /(^|[^\[])\^/g; - function edit$1(regex, opt) { + function edit(regex, opt) { regex = regex.source || regex; opt = opt || ''; var obj = { @@ -176,12 +185,12 @@ var nonWordAndColonTest = /[^\w:]/g; var originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i; - function cleanUrl$1(sanitize, base, href) { + function cleanUrl(sanitize, base, href) { if (sanitize) { var prot; try { - prot = decodeURIComponent(unescape$1(href)).replace(nonWordAndColonTest, '').toLowerCase(); + prot = decodeURIComponent(unescape(href)).replace(nonWordAndColonTest, '').toLowerCase(); } catch (e) { return null; } @@ -217,7 +226,7 @@ if (justDomain.test(base)) { baseUrls[' ' + base] = base + '/'; } else { - baseUrls[' ' + base] = rtrim$1(base, '/', true); + baseUrls[' ' + base] = rtrim(base, '/', true); } } @@ -241,11 +250,11 @@ } } - var noopTest$1 = { + var noopTest = { exec: function noopTest() {} }; - function merge$2(obj) { + function merge(obj) { var i = 1, target, key; @@ -263,7 +272,7 @@ return obj; } - function splitCells$1(tableRow, count) { + function splitCells(tableRow, count) { // ensure that every cell-delimiting pipe has a space // before it to distinguish it from an escaped pipe var row = tableRow.replace(/\|/g, function (match, offset, str) { @@ -284,7 +293,15 @@ } }), cells = row.split(/ \|/); - var i = 0; + var i = 0; // First/last cell in a row cannot be empty if it has no leading/trailing pipe + + if (!cells[0].trim()) { + cells.shift(); + } + + if (!cells[cells.length - 1].trim()) { + cells.pop(); + } if (cells.length > count) { cells.splice(count); @@ -305,7 +322,7 @@ // invert: Remove suffix of non-c chars instead. Default falsey. - function rtrim$1(str, c, invert) { + function rtrim(str, c, invert) { var l = str.length; if (l === 0) { @@ -330,7 +347,7 @@ return str.substr(0, l - suffLen); } - function findClosingBracket$1(str, b) { + function findClosingBracket(str, b) { if (str.indexOf(b[1]) === -1) { return -1; } @@ -356,14 +373,14 @@ return -1; } - function checkSanitizeDeprecation$1(opt) { + function checkSanitizeDeprecation(opt) { if (opt && opt.sanitize && !opt.silent) { console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options'); } } // copied from https://stackoverflow.com/a/5450113/806777 - function repeatString$1(pattern, count) { + function repeatString(pattern, count) { if (count < 1) { return ''; } @@ -383,38 +400,40 @@ } var helpers = { - escape: escape$2, - unescape: unescape$1, - edit: edit$1, - cleanUrl: cleanUrl$1, + escape: escape, + unescape: unescape, + edit: edit, + cleanUrl: cleanUrl, resolveUrl: resolveUrl, - noopTest: noopTest$1, - merge: merge$2, - splitCells: splitCells$1, - rtrim: rtrim$1, - findClosingBracket: findClosingBracket$1, - checkSanitizeDeprecation: checkSanitizeDeprecation$1, - repeatString: repeatString$1 + noopTest: noopTest, + merge: merge, + splitCells: splitCells, + rtrim: rtrim, + findClosingBracket: findClosingBracket, + checkSanitizeDeprecation: checkSanitizeDeprecation, + repeatString: repeatString }; - var defaults$4 = defaults$5.exports.defaults; - var rtrim = helpers.rtrim, - splitCells = helpers.splitCells, + var defaults$1 = defaults.defaults; + var rtrim$1 = helpers.rtrim, + splitCells$1 = helpers.splitCells, _escape = helpers.escape, - findClosingBracket = helpers.findClosingBracket; + findClosingBracket$1 = helpers.findClosingBracket; - function outputLink(cap, link, raw) { + function outputLink(cap, link, raw, lexer) { var href = link.href; var title = link.title ? _escape(link.title) : null; var text = cap[1].replace(/\\([\[\]])/g, '$1'); if (cap[0].charAt(0) !== '!') { + lexer.state.inLink = true; return { type: 'link', raw: raw, href: href, title: title, - text: text + text: text, + tokens: lexer.inlineTokens(text, []) }; } else { return { @@ -458,7 +477,7 @@ var Tokenizer_1 = /*#__PURE__*/function () { function Tokenizer(options) { - this.options = options || defaults$4; + this.options = options || defaults$1; } var _proto = Tokenizer.prototype; @@ -489,7 +508,7 @@ type: 'code', raw: cap[0], codeBlockStyle: 'indented', - text: !this.options.pedantic ? rtrim(text, '\n') : text + text: !this.options.pedantic ? rtrim$1(text, '\n') : text }; } }; @@ -516,7 +535,7 @@ var text = cap[2].trim(); // remove trailing #s if (/#$/.test(text)) { - var trimmed = rtrim(text, '#'); + var trimmed = rtrim$1(text, '#'); if (this.options.pedantic) { text = trimmed.trim(); @@ -526,51 +545,15 @@ } } - return { + var token = { type: 'heading', raw: cap[0], depth: cap[1].length, - text: text - }; - } - }; - - _proto.nptable = function nptable(src) { - var cap = this.rules.block.nptable.exec(src); - - if (cap) { - var item = { - type: 'table', - header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')), - align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */), - cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [], - raw: cap[0] + text: text, + tokens: [] }; - - if (item.header.length === item.align.length) { - var l = item.align.length; - var i; - - for (i = 0; i < l; i++) { - if (/^ *-+: *$/.test(item.align[i])) { - item.align[i] = 'right'; - } else if (/^ *:-+: *$/.test(item.align[i])) { - item.align[i] = 'center'; - } else if (/^ *:-+ *$/.test(item.align[i])) { - item.align[i] = 'left'; - } else { - item.align[i] = null; - } - } - - l = item.cells.length; - - for (i = 0; i < l; i++) { - item.cells[i] = splitCells(item.cells[i], item.header.length); - } - - return item; - } + this.lexer.inline(token.text, token.tokens); + return token; } }; @@ -593,6 +576,7 @@ return { type: 'blockquote', raw: cap[0], + tokens: this.lexer.blockTokens(text, []), text: text }; } @@ -678,7 +662,7 @@ } // trim item newlines at end - item = rtrim(item, '\n'); + item = rtrim$1(item, '\n'); if (i !== l - 1) { raw = raw + '\n'; @@ -709,15 +693,24 @@ } } - list.items.push({ + this.lexer.state.top = false; + var token = { type: 'list_item', raw: raw, task: istask, checked: ischecked, loose: loose, - text: item - }); - } + text: item, + tokens: this.lexer.blockTokens(item, []) + }; // this.lexer.inline(token.text, ) + + list.items.push(token); + } // l2 = token.items.length; + // for (j = 0; j < l2; j++) { + // this.inline(token.items[j].tokens); + // } + // break; + return list; } @@ -727,12 +720,21 @@ var cap = this.rules.block.html.exec(src); if (cap) { - return { - type: this.options.sanitize ? 'paragraph' : 'html', + var token = { + type: 'html', raw: cap[0], pre: !this.options.sanitizer && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'), - text: this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]) : cap[0] + text: cap[0] }; + + if (this.options.sanitize) { + token.type = 'paragraph'; + token.text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]); + token.tokens = []; + this.lexer.inline(token.text, token.tokens); + } + + return token; } }; @@ -758,15 +760,19 @@ if (cap) { var item = { type: 'table', - header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')), + header: { + text: splitCells$1(cap[1]) + }, align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */), - cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [] + cells: { + text: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [] + } }; - if (item.header.length === item.align.length) { + if (item.header.text.length === item.align.length) { item.raw = cap[0]; var l = item.align.length; - var i; + var i, j, k, row; for (i = 0; i < l; i++) { if (/^ *-+: *$/.test(item.align[i])) { @@ -780,10 +786,34 @@ } } - l = item.cells.length; + l = item.cells.text.length; for (i = 0; i < l; i++) { - item.cells[i] = splitCells(item.cells[i].replace(/^ *\| *| *\| *$/g, ''), item.header.length); + item.cells.text[i] = splitCells$1(item.cells.text[i], item.header.text.length); + } // parse child tokens inside headers and cells + + + item.header.tokens = []; + item.cells.tokens = []; // header child tokens + + l = item.header.text.length; + + for (j = 0; j < l; j++) { + item.header.tokens[j] = []; + this.lexer.inlineTokens(item.header.text[j], item.header.tokens[j]); + } // cell child tokens + + + l = item.cells.text.length; + + for (j = 0; j < l; j++) { + row = item.cells.text[j]; + item.cells.tokens[j] = []; + + for (k = 0; k < row.length; k++) { + item.cells.tokens[j][k] = []; + this.lexer.inlineTokens(row[k], item.cells.tokens[j][k]); + } } return item; @@ -795,12 +825,15 @@ var cap = this.rules.block.lheading.exec(src); if (cap) { - return { + var token = { type: 'heading', raw: cap[0], depth: cap[2].charAt(0) === '=' ? 1 : 2, - text: cap[1] + text: cap[1], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } }; @@ -808,11 +841,14 @@ var cap = this.rules.block.paragraph.exec(src); if (cap) { - return { + var token = { type: 'paragraph', raw: cap[0], - text: cap[1].charAt(cap[1].length - 1) === '\n' ? cap[1].slice(0, -1) : cap[1] + text: cap[1].charAt(cap[1].length - 1) === '\n' ? cap[1].slice(0, -1) : cap[1], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } }; @@ -820,11 +856,14 @@ var cap = this.rules.block.text.exec(src); if (cap) { - return { + var token = { type: 'text', raw: cap[0], - text: cap[0] + text: cap[0], + tokens: [] }; + this.lexer.inline(token.text, token.tokens); + return token; } }; @@ -840,27 +879,27 @@ } }; - _proto.tag = function tag(src, inLink, inRawBlock) { + _proto.tag = function tag(src) { var cap = this.rules.inline.tag.exec(src); if (cap) { - if (!inLink && /^
/i.test(cap[0])) { - inLink = false; + if (!this.lexer.state.inLink && /^/i.test(cap[0])) { + this.lexer.state.inLink = false; } - if (!inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { - inRawBlock = true; - } else if (inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { - inRawBlock = false; + if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { + this.lexer.state.inRawBlock = true; + } else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) { + this.lexer.state.inRawBlock = false; } return { type: this.options.sanitize ? 'text' : 'html', raw: cap[0], - inLink: inLink, - inRawBlock: inRawBlock, + inLink: this.lexer.state.inLink, + inRawBlock: this.lexer.state.inRawBlock, text: this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]) : cap[0] }; } @@ -879,14 +918,14 @@ } // ending angle bracket cannot be escaped - var rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\'); + var rtrimSlash = rtrim$1(trimmedUrl.slice(0, -1), '\\'); if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) { return; } } else { // find closing parenthesis - var lastParenIndex = findClosingBracket(cap[2], '()'); + var lastParenIndex = findClosingBracket$1(cap[2], '()'); if (lastParenIndex > -1) { var start = cap[0].indexOf('!') === 0 ? 5 : 4; @@ -926,7 +965,7 @@ return outputLink(cap, { href: href ? href.replace(this.rules.inline._escapes, '$1') : href, title: title ? title.replace(this.rules.inline._escapes, '$1') : title - }, cap[0]); + }, cap[0], this.lexer); } }; @@ -946,7 +985,7 @@ }; } - return outputLink(cap, link, cap[0]); + return outputLink(cap, link, cap[0], this.lexer); } }; @@ -997,18 +1036,23 @@ rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal); // Create `em` if smallest delimiter has odd char count. *a*** if (Math.min(lLength, rLength) % 2) { + var _text = src.slice(1, lLength + match.index + rLength); + return { type: 'em', raw: src.slice(0, lLength + match.index + rLength + 1), - text: src.slice(1, lLength + match.index + rLength) + text: _text, + tokens: this.lexer.inlineTokens(_text, []) }; } // Create 'strong' if smallest delimiter has even char count. **a*** + var text = src.slice(2, lLength + match.index + rLength - 1); return { type: 'strong', raw: src.slice(0, lLength + match.index + rLength + 1), - text: src.slice(2, lLength + match.index + rLength - 1) + text: text, + tokens: this.lexer.inlineTokens(text, []) }; } } @@ -1053,7 +1097,8 @@ return { type: 'del', raw: cap[0], - text: cap[2] + text: cap[2], + tokens: this.lexer.inlineTokens(cap[2], []) }; } }; @@ -1127,13 +1172,13 @@ } }; - _proto.inlineText = function inlineText(src, inRawBlock, smartypants) { + _proto.inlineText = function inlineText(src, smartypants) { var cap = this.rules.inline.text.exec(src); if (cap) { var text; - if (inRawBlock) { + if (this.lexer.state.inRawBlock) { text = this.options.sanitize ? this.options.sanitizer ? this.options.sanitizer(cap[0]) : _escape(cap[0]) : cap[0]; } else { text = _escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]); @@ -1150,14 +1195,14 @@ return Tokenizer; }(); - var noopTest = helpers.noopTest, - edit = helpers.edit, + var noopTest$1 = helpers.noopTest, + edit$1 = helpers.edit, merge$1 = helpers.merge; /** * Block-Level Grammar */ - var block$1 = { + var block = { newline: /^(?: *(?:\n|$))+/, code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/, fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/, @@ -1176,76 +1221,68 @@ + '|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag + ')', def: /^ {0,3}\[(label)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/, - nptable: noopTest, - table: noopTest, + table: noopTest$1, lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/, // regex template, placeholders will be replaced according to different paragraph // interruption rules of commonmark and the original markdown spec: _paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html| +\n)[^\n]+)*)/, text: /^[^\n]+/ }; - block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/; - block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/; - block$1.def = edit(block$1.def).replace('label', block$1._label).replace('title', block$1._title).getRegex(); - block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/; - block$1.item = /^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/; - block$1.item = edit(block$1.item, 'gm').replace(/bull/g, block$1.bullet).getRegex(); - block$1.listItemStart = edit(/^( *)(bull) */).replace('bull', block$1.bullet).getRegex(); - block$1.list = edit(block$1.list).replace(/bull/g, block$1.bullet).replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))').replace('def', '\\n+(?=' + block$1.def.source + ')').getRegex(); - block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption' + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption' + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe' + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option' + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr' + '|track|ul'; - block$1._comment = /|$)/; - block$1.html = edit(block$1.html, 'i').replace('comment', block$1._comment).replace('tag', block$1._tag).replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(); - block$1.paragraph = edit(block$1._paragraph).replace('hr', block$1.hr).replace('heading', ' {0,3}#{1,6} ').replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs + block._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/; + block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/; + block.def = edit$1(block.def).replace('label', block._label).replace('title', block._title).getRegex(); + block.bullet = /(?:[*+-]|\d{1,9}[.)])/; + block.item = /^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/; + block.item = edit$1(block.item, 'gm').replace(/bull/g, block.bullet).getRegex(); + block.listItemStart = edit$1(/^( *)(bull) */).replace('bull', block.bullet).getRegex(); + block.list = edit$1(block.list).replace(/bull/g, block.bullet).replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))').replace('def', '\\n+(?=' + block.def.source + ')').getRegex(); + block._tag = 'address|article|aside|base|basefont|blockquote|body|caption' + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption' + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe' + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option' + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr' + '|track|ul'; + block._comment = /|$)/; + block.html = edit$1(block.html, 'i').replace('comment', block._comment).replace('tag', block._tag).replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(); + block.paragraph = edit$1(block._paragraph).replace('hr', block.hr).replace('heading', ' {0,3}#{1,6} ').replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs .replace('blockquote', ' {0,3}>').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt - .replace('html', ')|<(?:script|pre|style|textarea|!--)').replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks + .replace('html', ')|<(?:script|pre|style|textarea|!--)').replace('tag', block._tag) // pars can be interrupted by type (6) html blocks .getRegex(); - block$1.blockquote = edit(block$1.blockquote).replace('paragraph', block$1.paragraph).getRegex(); + block.blockquote = edit$1(block.blockquote).replace('paragraph', block.paragraph).getRegex(); /** * Normal Block Grammar */ - block$1.normal = merge$1({}, block$1); + block.normal = merge$1({}, block); /** * GFM Block Grammar */ - block$1.gfm = merge$1({}, block$1.normal, { - nptable: '^ *([^|\\n ].*\\|.*)\\n' // Header - + ' {0,3}([-:]+ *\\|[-| :]*)' // Align - + '(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)', - // Cells - table: '^ *\\|(.+)\\n' // Header - + ' {0,3}\\|?( *[-:]+[-| :]*)' // Align + block.gfm = merge$1({}, block.normal, { + table: '^ *([^\\n ].*\\|.*)\\n' // Header + + ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)\\|?' // Align + '(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells }); - block$1.gfm.nptable = edit(block$1.gfm.nptable).replace('hr', block$1.hr).replace('heading', ' {0,3}#{1,6} ').replace('blockquote', ' {0,3}>').replace('code', ' {4}[^\\n]').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt - .replace('html', ')|<(?:script|pre|style|textarea|!--)').replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks - .getRegex(); - block$1.gfm.table = edit(block$1.gfm.table).replace('hr', block$1.hr).replace('heading', ' {0,3}#{1,6} ').replace('blockquote', ' {0,3}>').replace('code', ' {4}[^\\n]').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt - .replace('html', ')|<(?:script|pre|style|textarea|!--)').replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks + block.gfm.table = edit$1(block.gfm.table).replace('hr', block.hr).replace('heading', ' {0,3}#{1,6} ').replace('blockquote', ' {0,3}>').replace('code', ' {4}[^\\n]').replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n').replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt + .replace('html', ')|<(?:script|pre|style|textarea|!--)').replace('tag', block._tag) // tables can be interrupted by type (6) html blocks .getRegex(); /** * Pedantic grammar (original John Gruber's loose markdown specification) */ - block$1.pedantic = merge$1({}, block$1.normal, { - html: edit('^ *(?:comment *(?:\\n|\\s*$)' + '|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)' // closed tag - + '|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))').replace('comment', block$1._comment).replace(/tag/g, '(?!(?:' + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub' + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)' + '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b').getRegex(), + block.pedantic = merge$1({}, block.normal, { + html: edit$1('^ *(?:comment *(?:\\n|\\s*$)' + '|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)' // closed tag + + '|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))').replace('comment', block._comment).replace(/tag/g, '(?!(?:' + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub' + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)' + '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b').getRegex(), def: /^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/, heading: /^(#{1,6})(.*)(?:\n+|$)/, - fences: noopTest, + fences: noopTest$1, // fences not supported - paragraph: edit(block$1.normal._paragraph).replace('hr', block$1.hr).replace('heading', ' *#{1,6} *[^\n]').replace('lheading', block$1.lheading).replace('blockquote', ' {0,3}>').replace('|fences', '').replace('|list', '').replace('|html', '').getRegex() + paragraph: edit$1(block.normal._paragraph).replace('hr', block.hr).replace('heading', ' *#{1,6} *[^\n]').replace('lheading', block.lheading).replace('blockquote', ' {0,3}>').replace('|fences', '').replace('|list', '').replace('|html', '').getRegex() }); /** * Inline-Level Grammar */ - var inline$1 = { + var inline = { escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/, autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/, - url: noopTest, + url: noopTest$1, tag: '^comment' + '|^' // self-closing tag + '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag + '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. @@ -1266,43 +1303,43 @@ }, code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/, br: /^( {2,}|\\)\n(?!\s*$)/, - del: noopTest, + del: noopTest$1, text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\?@\\[\\]`^{|}~'; - inline$1.punctuation = edit(inline$1.punctuation).replace(/punctuation/g, inline$1._punctuation).getRegex(); // sequences em should skip over [title](link), `code`, - - inline$1.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g; - inline$1.escapedEmSt = /\\\*|\\_/g; - inline$1._comment = edit(block$1._comment).replace('(?:-->|$)', '-->').getRegex(); - inline$1.emStrong.lDelim = edit(inline$1.emStrong.lDelim).replace(/punct/g, inline$1._punctuation).getRegex(); - inline$1.emStrong.rDelimAst = edit(inline$1.emStrong.rDelimAst, 'g').replace(/punct/g, inline$1._punctuation).getRegex(); - inline$1.emStrong.rDelimUnd = edit(inline$1.emStrong.rDelimUnd, 'g').replace(/punct/g, inline$1._punctuation).getRegex(); - inline$1._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g; - inline$1._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/; - inline$1._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/; - inline$1.autolink = edit(inline$1.autolink).replace('scheme', inline$1._scheme).replace('email', inline$1._email).getRegex(); - inline$1._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/; - inline$1.tag = edit(inline$1.tag).replace('comment', inline$1._comment).replace('attribute', inline$1._attribute).getRegex(); - inline$1._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/; - inline$1._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/; - inline$1._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/; - inline$1.link = edit(inline$1.link).replace('label', inline$1._label).replace('href', inline$1._href).replace('title', inline$1._title).getRegex(); - inline$1.reflink = edit(inline$1.reflink).replace('label', inline$1._label).getRegex(); - inline$1.reflinkSearch = edit(inline$1.reflinkSearch, 'g').replace('reflink', inline$1.reflink).replace('nolink', inline$1.nolink).getRegex(); + inline._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~'; + inline.punctuation = edit$1(inline.punctuation).replace(/punctuation/g, inline._punctuation).getRegex(); // sequences em should skip over [title](link), `code`, + + inline.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g; + inline.escapedEmSt = /\\\*|\\_/g; + inline._comment = edit$1(block._comment).replace('(?:-->|$)', '-->').getRegex(); + inline.emStrong.lDelim = edit$1(inline.emStrong.lDelim).replace(/punct/g, inline._punctuation).getRegex(); + inline.emStrong.rDelimAst = edit$1(inline.emStrong.rDelimAst, 'g').replace(/punct/g, inline._punctuation).getRegex(); + inline.emStrong.rDelimUnd = edit$1(inline.emStrong.rDelimUnd, 'g').replace(/punct/g, inline._punctuation).getRegex(); + inline._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g; + inline._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/; + inline._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/; + inline.autolink = edit$1(inline.autolink).replace('scheme', inline._scheme).replace('email', inline._email).getRegex(); + inline._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/; + inline.tag = edit$1(inline.tag).replace('comment', inline._comment).replace('attribute', inline._attribute).getRegex(); + inline._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/; + inline._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/; + inline._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/; + inline.link = edit$1(inline.link).replace('label', inline._label).replace('href', inline._href).replace('title', inline._title).getRegex(); + inline.reflink = edit$1(inline.reflink).replace('label', inline._label).getRegex(); + inline.reflinkSearch = edit$1(inline.reflinkSearch, 'g').replace('reflink', inline.reflink).replace('nolink', inline.nolink).getRegex(); /** * Normal Inline Grammar */ - inline$1.normal = merge$1({}, inline$1); + inline.normal = merge$1({}, inline); /** * Pedantic Inline Grammar */ - inline$1.pedantic = merge$1({}, inline$1.normal, { + inline.pedantic = merge$1({}, inline.normal, { strong: { start: /^__|\*\*/, middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/, @@ -1315,40 +1352,39 @@ endAst: /\*(?!\*)/g, endUnd: /_(?!_)/g }, - link: edit(/^!?\[(label)\]\((.*?)\)/).replace('label', inline$1._label).getRegex(), - reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace('label', inline$1._label).getRegex() + link: edit$1(/^!?\[(label)\]\((.*?)\)/).replace('label', inline._label).getRegex(), + reflink: edit$1(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace('label', inline._label).getRegex() }); /** * GFM Inline Grammar */ - inline$1.gfm = merge$1({}, inline$1.normal, { - escape: edit(inline$1.escape).replace('])', '~|])').getRegex(), + inline.gfm = merge$1({}, inline.normal, { + escape: edit$1(inline.escape).replace('])', '~|])').getRegex(), _extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/, url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/, _backpedal: /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/, del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/, text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\= 0) { startIndex = Math.min(startIndex, tempStart); @@ -1622,12 +1658,14 @@ })(); } - if (top && (token = this.tokenizer.paragraph(cutSrc))) { + if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) { lastToken = tokens[tokens.length - 1]; if (lastParagraphClipped && lastToken.type === 'paragraph') { lastToken.raw += '\n' + token.raw; lastToken.text += '\n' + token.text; + this.inlineQueue.pop(); + this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text; } else { tokens.push(token); } @@ -1645,6 +1683,8 @@ if (lastToken && lastToken.type === 'text') { lastToken.raw += '\n' + token.raw; lastToken.text += '\n' + token.text; + this.inlineQueue.pop(); + this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text; } else { tokens.push(token); } @@ -1664,97 +1704,28 @@ } } + this.state.top = true; return tokens; }; - _proto.inline = function inline(tokens) { - var i, j, k, l2, row, token; - var l = tokens.length; - - for (i = 0; i < l; i++) { - token = tokens[i]; - - switch (token.type) { - case 'paragraph': - case 'text': - case 'heading': - { - token.tokens = []; - this.inlineTokens(token.text, token.tokens); - break; - } - - case 'table': - { - token.tokens = { - header: [], - cells: [] - }; // header - - l2 = token.header.length; - - for (j = 0; j < l2; j++) { - token.tokens.header[j] = []; - this.inlineTokens(token.header[j], token.tokens.header[j]); - } // cells - - - l2 = token.cells.length; - - for (j = 0; j < l2; j++) { - row = token.cells[j]; - token.tokens.cells[j] = []; - - for (k = 0; k < row.length; k++) { - token.tokens.cells[j][k] = []; - this.inlineTokens(row[k], token.tokens.cells[j][k]); - } - } - - break; - } - - case 'blockquote': - { - this.inline(token.tokens); - break; - } - - case 'list': - { - l2 = token.items.length; - - for (j = 0; j < l2; j++) { - this.inline(token.items[j].tokens); - } - - break; - } - } - } - - return tokens; + _proto.inline = function inline(src, tokens) { + this.inlineQueue.push({ + src: src, + tokens: tokens + }); } /** * Lexing/Compiling */ ; - _proto.inlineTokens = function inlineTokens(src, tokens, inLink, inRawBlock) { + _proto.inlineTokens = function inlineTokens(src, tokens) { var _this2 = this; if (tokens === void 0) { tokens = []; } - if (inLink === void 0) { - inLink = false; - } - - if (inRawBlock === void 0) { - inRawBlock = false; - } - var token, lastToken, cutSrc; // String with links masked to avoid interference with em and strong var maskedSrc = src; @@ -1767,7 +1738,7 @@ if (links.length > 0) { while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) { if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) { - maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex); + maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString$1('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex); } } } @@ -1775,7 +1746,7 @@ while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) { - maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex); + maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString$1('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex); } // Mask out escaped em & strong delimiters @@ -1791,7 +1762,9 @@ keepPrevChar = false; // extensions if (this.options.extensions && this.options.extensions.inline && this.options.extensions.inline.some(function (extTokenizer) { - if (token = extTokenizer.call(_this2, src, tokens)) { + if (token = extTokenizer.call({ + lexer: _this2 + }, src, tokens)) { src = src.substring(token.raw.length); tokens.push(token); return true; @@ -1810,10 +1783,8 @@ } // tag - if (token = this.tokenizer.tag(src, inLink, inRawBlock)) { + if (token = this.tokenizer.tag(src)) { src = src.substring(token.raw.length); - inLink = token.inLink; - inRawBlock = token.inRawBlock; lastToken = tokens[tokens.length - 1]; if (lastToken && token.type === 'text' && lastToken.type === 'text') { @@ -1829,11 +1800,6 @@ if (token = this.tokenizer.link(src)) { src = src.substring(token.raw.length); - - if (token.type === 'link') { - token.tokens = this.inlineTokens(token.text, [], true, inRawBlock); - } - tokens.push(token); continue; } // reflink, nolink @@ -1843,10 +1809,7 @@ src = src.substring(token.raw.length); lastToken = tokens[tokens.length - 1]; - if (token.type === 'link') { - token.tokens = this.inlineTokens(token.text, [], true, inRawBlock); - tokens.push(token); - } else if (lastToken && token.type === 'text' && lastToken.type === 'text') { + if (lastToken && token.type === 'text' && lastToken.type === 'text') { lastToken.raw += token.raw; lastToken.text += token.text; } else { @@ -1859,7 +1822,6 @@ if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) { src = src.substring(token.raw.length); - token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock); tokens.push(token); continue; } // code @@ -1881,7 +1843,6 @@ if (token = this.tokenizer.del(src)) { src = src.substring(token.raw.length); - token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock); tokens.push(token); continue; } // autolink @@ -1894,7 +1855,7 @@ } // url (gfm) - if (!inLink && (token = this.tokenizer.url(src, mangle))) { + if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) { src = src.substring(token.raw.length); tokens.push(token); continue; @@ -1911,7 +1872,9 @@ var tempStart = void 0; _this2.options.extensions.startInline.forEach(function (getStartIndex) { - tempStart = getStartIndex.call(this, tempSrc); + tempStart = getStartIndex.call({ + lexer: this + }, tempSrc); if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); @@ -1924,7 +1887,7 @@ })(); } - if (token = this.tokenizer.inlineText(cutSrc, inRawBlock, smartypants)) { + if (token = this.tokenizer.inlineText(cutSrc, smartypants)) { src = src.substring(token.raw.length); if (token.raw.slice(-1) !== '_') { @@ -1964,8 +1927,8 @@ key: "rules", get: function get() { return { - block: block, - inline: inline + block: block$1, + inline: inline$1 }; } }]); @@ -1973,8 +1936,8 @@ return Lexer; }(); - var defaults$2 = defaults$5.exports.defaults; - var cleanUrl = helpers.cleanUrl, + var defaults$3 = defaults.defaults; + var cleanUrl$1 = helpers.cleanUrl, escape$1 = helpers.escape; /** * Renderer @@ -1982,7 +1945,7 @@ var Renderer_1 = /*#__PURE__*/function () { function Renderer(options) { - this.options = options || defaults$2; + this.options = options || defaults$3; } var _proto = Renderer.prototype; @@ -2084,7 +2047,7 @@ }; _proto.link = function link(href, title, text) { - href = cleanUrl(this.options.sanitize, this.options.baseUrl, href); + href = cleanUrl$1(this.options.sanitize, this.options.baseUrl, href); if (href === null) { return text; @@ -2101,7 +2064,7 @@ }; _proto.image = function image(href, title, text) { - href = cleanUrl(this.options.sanitize, this.options.baseUrl, href); + href = cleanUrl$1(this.options.sanitize, this.options.baseUrl, href); if (href === null) { return text; @@ -2128,7 +2091,6 @@ * TextRenderer * returns only the textual part of the token */ - var TextRenderer_1 = /*#__PURE__*/function () { function TextRenderer() {} @@ -2177,7 +2139,6 @@ /** * Slugger generates header id */ - var Slugger_1 = /*#__PURE__*/function () { function Slugger() { this.seen = {}; @@ -2234,23 +2195,20 @@ return Slugger; }(); - var Renderer$1 = Renderer_1; - var TextRenderer$1 = TextRenderer_1; - var Slugger$1 = Slugger_1; - var defaults$1 = defaults$5.exports.defaults; - var unescape = helpers.unescape; + var defaults$4 = defaults.defaults; + var unescape$1 = helpers.unescape; /** * Parsing & Compiling */ var Parser_1 = /*#__PURE__*/function () { function Parser(options) { - this.options = options || defaults$1; - this.options.renderer = this.options.renderer || new Renderer$1(); + this.options = options || defaults$4; + this.options.renderer = this.options.renderer || new Renderer_1(); this.renderer = this.options.renderer; this.renderer.options = this.options; - this.textRenderer = new TextRenderer$1(); - this.slugger = new Slugger$1(); + this.textRenderer = new TextRenderer_1(); + this.slugger = new Slugger_1(); } /** * Static Parse Method @@ -2308,7 +2266,9 @@ token = tokens[i]; // Run any renderer extensions if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) { - ret = this.options.extensions.renderers[token.type].call(this, token); + ret = this.options.extensions.renderers[token.type].call({ + parser: this + }, token); if (ret !== false || !['space', 'hr', 'heading', 'code', 'table', 'blockquote', 'list', 'html', 'paragraph', 'text'].includes(token.type)) { out += ret || ''; @@ -2330,7 +2290,7 @@ case 'heading': { - out += this.renderer.heading(this.parseInline(token.tokens), token.depth, unescape(this.parseInline(token.tokens, this.textRenderer)), this.slugger); + out += this.renderer.heading(this.parseInline(token.tokens), token.depth, unescape$1(this.parseInline(token.tokens, this.textRenderer)), this.slugger); continue; } @@ -2345,10 +2305,10 @@ header = ''; // header cell = ''; - l2 = token.header.length; + l2 = token.header.text.length; for (j = 0; j < l2; j++) { - cell += this.renderer.tablecell(this.parseInline(token.tokens.header[j]), { + cell += this.renderer.tablecell(this.parseInline(token.header.tokens[j]), { header: true, align: token.align[j] }); @@ -2356,10 +2316,10 @@ header += this.renderer.tablerow(cell); body = ''; - l2 = token.cells.length; + l2 = token.cells.text.length; for (j = 0; j < l2; j++) { - row = token.tokens.cells[j]; + row = token.cells.tokens[j]; cell = ''; l3 = row.length; @@ -2486,7 +2446,9 @@ token = tokens[i]; // Run any renderer extensions if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) { - ret = this.options.extensions.renderers[token.type].call(this, token); + ret = this.options.extensions.renderers[token.type].call({ + parser: this + }, token); if (ret !== false || !['escape', 'html', 'link', 'image', 'strong', 'em', 'codespan', 'br', 'del', 'text'].includes(token.type)) { out += ret || ''; @@ -2575,18 +2537,12 @@ return Parser; }(); - var Lexer = Lexer_1; - var Parser = Parser_1; - var Tokenizer = Tokenizer_1; - var Renderer = Renderer_1; - var TextRenderer = TextRenderer_1; - var Slugger = Slugger_1; - var merge = helpers.merge, - checkSanitizeDeprecation = helpers.checkSanitizeDeprecation, - escape = helpers.escape; - var getDefaults = defaults$5.exports.getDefaults, - changeDefaults = defaults$5.exports.changeDefaults, - defaults = defaults$5.exports.defaults; + var merge$2 = helpers.merge, + checkSanitizeDeprecation$1 = helpers.checkSanitizeDeprecation, + escape$2 = helpers.escape; + var getDefaults = defaults.getDefaults, + changeDefaults = defaults.changeDefaults, + defaults$5 = defaults.defaults; /** * Marked */ @@ -2606,15 +2562,15 @@ opt = null; } - opt = merge({}, marked.defaults, opt || {}); - checkSanitizeDeprecation(opt); + opt = merge$2({}, marked.defaults, opt || {}); + checkSanitizeDeprecation$1(opt); if (callback) { var highlight = opt.highlight; var tokens; try { - tokens = Lexer.lex(src, opt); + tokens = Lexer_1.lex(src, opt); } catch (e) { return callback(e); } @@ -2628,7 +2584,7 @@ marked.walkTokens(tokens, opt.walkTokens); } - out = Parser.parse(tokens, opt); + out = Parser_1.parse(tokens, opt); } catch (e) { err = e; } @@ -2677,18 +2633,18 @@ } try { - var _tokens = Lexer.lex(src, opt); + var _tokens = Lexer_1.lex(src, opt); if (opt.walkTokens) { marked.walkTokens(_tokens, opt.walkTokens); } - return Parser.parse(_tokens, opt); + return Parser_1.parse(_tokens, opt); } catch (e) { e.message += '\nPlease report this to https://github.com/markedjs/marked.'; if (opt.silent) { - return '

An error occurred:

' + escape(e.message + '', true) + '
'; + return '

An error occurred:

' + escape$2(e.message + '', true) + '
'; } throw e; @@ -2700,13 +2656,13 @@ marked.options = marked.setOptions = function (opt) { - merge(marked.defaults, opt); + merge$2(marked.defaults, opt); changeDefaults(marked.defaults); return marked; }; marked.getDefaults = getDefaults; - marked.defaults = defaults; + marked.defaults = defaults$5; /** * Use Extension */ @@ -2718,7 +2674,7 @@ args[_key] = arguments[_key]; } - var opts = merge.apply(void 0, [{}].concat(args)); + var opts = merge$2.apply(void 0, [{}].concat(args)); var extensions = marked.defaults.extensions || { renderers: {}, childTokens: {} @@ -2797,7 +2753,7 @@ if (pack.renderer) { (function () { - var renderer = marked.defaults.renderer || new Renderer(); + var renderer = marked.defaults.renderer || new Renderer_1(); var _loop = function _loop(prop) { var prevRenderer = renderer[prop]; // Replace renderer with func to run extension, but fall back if false @@ -2827,7 +2783,7 @@ if (pack.tokenizer) { (function () { - var tokenizer = marked.defaults.tokenizer || new Tokenizer(); + var tokenizer = marked.defaults.tokenizer || new Tokenizer_1(); var _loop2 = function _loop2(prop) { var prevTokenizer = tokenizer[prop]; // Replace tokenizer with func to run extension, but fall back if false @@ -2888,12 +2844,12 @@ switch (token.type) { case 'table': { - for (var _iterator2 = _createForOfIteratorHelperLoose(token.tokens.header), _step2; !(_step2 = _iterator2()).done;) { + for (var _iterator2 = _createForOfIteratorHelperLoose(token.header.tokens), _step2; !(_step2 = _iterator2()).done;) { var cell = _step2.value; marked.walkTokens(cell, callback); } - for (var _iterator3 = _createForOfIteratorHelperLoose(token.tokens.cells), _step3; !(_step3 = _iterator3()).done;) { + for (var _iterator3 = _createForOfIteratorHelperLoose(token.cells.tokens), _step3; !(_step3 = _iterator3()).done;) { var row = _step3.value; for (var _iterator4 = _createForOfIteratorHelperLoose(row), _step4; !(_step4 = _iterator4()).done;) { @@ -2944,22 +2900,22 @@ throw new Error('marked.parseInline(): input parameter is of type ' + Object.prototype.toString.call(src) + ', string expected'); } - opt = merge({}, marked.defaults, opt || {}); - checkSanitizeDeprecation(opt); + opt = merge$2({}, marked.defaults, opt || {}); + checkSanitizeDeprecation$1(opt); try { - var tokens = Lexer.lexInline(src, opt); + var tokens = Lexer_1.lexInline(src, opt); if (opt.walkTokens) { marked.walkTokens(tokens, opt.walkTokens); } - return Parser.parseInline(tokens, opt); + return Parser_1.parseInline(tokens, opt); } catch (e) { e.message += '\nPlease report this to https://github.com/markedjs/marked.'; if (opt.silent) { - return '

An error occurred:

' + escape(e.message + '', true) + '
'; + return '

An error occurred:

' + escape$2(e.message + '', true) + '
'; } throw e; @@ -2970,14 +2926,14 @@ */ - marked.Parser = Parser; - marked.parser = Parser.parse; - marked.Renderer = Renderer; - marked.TextRenderer = TextRenderer; - marked.Lexer = Lexer; - marked.lexer = Lexer.lex; - marked.Tokenizer = Tokenizer; - marked.Slugger = Slugger; + marked.Parser = Parser_1; + marked.parser = Parser_1.parse; + marked.Renderer = Renderer_1; + marked.TextRenderer = TextRenderer_1; + marked.Lexer = Lexer_1; + marked.lexer = Lexer_1.lex; + marked.Tokenizer = Tokenizer_1; + marked.Slugger = Slugger_1; marked.parse = marked; var marked_1 = marked;