|
|
@@ -1,2637 +0,0 @@
|
|
|
-/**
|
|
|
- * marked - a markdown parser
|
|
|
- * Copyright (c) 2011-2021, Christopher Jeffrey. (MIT Licensed)
|
|
|
- * https://github.com/markedjs/marked
|
|
|
- */
|
|
|
-
|
|
|
-/**
|
|
|
- * DO NOT EDIT THIS FILE
|
|
|
- * The code in this file is generated from files in ./src/
|
|
|
- */
|
|
|
-
|
|
|
-var defaults$5 = {exports: {}};
|
|
|
-
|
|
|
-function getDefaults$1() {
|
|
|
- return {
|
|
|
- baseUrl: null,
|
|
|
- breaks: false,
|
|
|
- gfm: true,
|
|
|
- headerIds: true,
|
|
|
- headerPrefix: '',
|
|
|
- highlight: null,
|
|
|
- langPrefix: 'language-',
|
|
|
- mangle: true,
|
|
|
- pedantic: false,
|
|
|
- renderer: null,
|
|
|
- sanitize: false,
|
|
|
- sanitizer: null,
|
|
|
- silent: false,
|
|
|
- smartLists: false,
|
|
|
- smartypants: false,
|
|
|
- tokenizer: null,
|
|
|
- walkTokens: null,
|
|
|
- xhtml: false
|
|
|
- };
|
|
|
-}
|
|
|
-
|
|
|
-function changeDefaults$1(newDefaults) {
|
|
|
- defaults$5.exports.defaults = newDefaults;
|
|
|
-}
|
|
|
-
|
|
|
-defaults$5.exports = {
|
|
|
- defaults: getDefaults$1(),
|
|
|
- getDefaults: getDefaults$1,
|
|
|
- changeDefaults: changeDefaults$1
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * Helpers
|
|
|
- */
|
|
|
-
|
|
|
-const escapeTest = /[&<>"']/;
|
|
|
-const escapeReplace = /[&<>"']/g;
|
|
|
-const escapeTestNoEncode = /[<>"']|&(?!#?\w+;)/;
|
|
|
-const escapeReplaceNoEncode = /[<>"']|&(?!#?\w+;)/g;
|
|
|
-const escapeReplacements = {
|
|
|
- '&': '&',
|
|
|
- '<': '<',
|
|
|
- '>': '>',
|
|
|
- '"': '"',
|
|
|
- "'": '''
|
|
|
-};
|
|
|
-const getEscapeReplacement = (ch) => escapeReplacements[ch];
|
|
|
-function escape$3(html, encode) {
|
|
|
- if (encode) {
|
|
|
- if (escapeTest.test(html)) {
|
|
|
- return html.replace(escapeReplace, getEscapeReplacement);
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (escapeTestNoEncode.test(html)) {
|
|
|
- return html.replace(escapeReplaceNoEncode, getEscapeReplacement);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return html;
|
|
|
-}
|
|
|
-
|
|
|
-const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
|
|
|
-
|
|
|
-function unescape$1(html) {
|
|
|
- // explicitly match decimal, hex, and named HTML entities
|
|
|
- return html.replace(unescapeTest, (_, n) => {
|
|
|
- n = n.toLowerCase();
|
|
|
- if (n === 'colon') return ':';
|
|
|
- if (n.charAt(0) === '#') {
|
|
|
- return n.charAt(1) === 'x'
|
|
|
- ? String.fromCharCode(parseInt(n.substring(2), 16))
|
|
|
- : String.fromCharCode(+n.substring(1));
|
|
|
- }
|
|
|
- return '';
|
|
|
- });
|
|
|
-}
|
|
|
-
|
|
|
-const caret = /(^|[^\[])\^/g;
|
|
|
-function edit$1(regex, opt) {
|
|
|
- regex = regex.source || regex;
|
|
|
- opt = opt || '';
|
|
|
- const obj = {
|
|
|
- replace: (name, val) => {
|
|
|
- val = val.source || val;
|
|
|
- val = val.replace(caret, '$1');
|
|
|
- regex = regex.replace(name, val);
|
|
|
- return obj;
|
|
|
- },
|
|
|
- getRegex: () => {
|
|
|
- return new RegExp(regex, opt);
|
|
|
- }
|
|
|
- };
|
|
|
- return obj;
|
|
|
-}
|
|
|
-
|
|
|
-const nonWordAndColonTest = /[^\w:]/g;
|
|
|
-const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
|
|
|
-function cleanUrl$1(sanitize, base, href) {
|
|
|
- if (sanitize) {
|
|
|
- let prot;
|
|
|
- try {
|
|
|
- prot = decodeURIComponent(unescape$1(href))
|
|
|
- .replace(nonWordAndColonTest, '')
|
|
|
- .toLowerCase();
|
|
|
- } catch (e) {
|
|
|
- return null;
|
|
|
- }
|
|
|
- if (prot.indexOf('javascript:') === 0 || prot.indexOf('vbscript:') === 0 || prot.indexOf('data:') === 0) {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- if (base && !originIndependentUrl.test(href)) {
|
|
|
- href = resolveUrl(base, href);
|
|
|
- }
|
|
|
- try {
|
|
|
- href = encodeURI(href).replace(/%25/g, '%');
|
|
|
- } catch (e) {
|
|
|
- return null;
|
|
|
- }
|
|
|
- return href;
|
|
|
-}
|
|
|
-
|
|
|
-const baseUrls = {};
|
|
|
-const justDomain = /^[^:]+:\/*[^/]*$/;
|
|
|
-const protocol = /^([^:]+:)[\s\S]*$/;
|
|
|
-const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/;
|
|
|
-
|
|
|
-function resolveUrl(base, href) {
|
|
|
- if (!baseUrls[' ' + base]) {
|
|
|
- // we can ignore everything in base after the last slash of its path component,
|
|
|
- // but we might need to add _that_
|
|
|
- // https://tools.ietf.org/html/rfc3986#section-3
|
|
|
- if (justDomain.test(base)) {
|
|
|
- baseUrls[' ' + base] = base + '/';
|
|
|
- } else {
|
|
|
- baseUrls[' ' + base] = rtrim$1(base, '/', true);
|
|
|
- }
|
|
|
- }
|
|
|
- base = baseUrls[' ' + base];
|
|
|
- const relativeBase = base.indexOf(':') === -1;
|
|
|
-
|
|
|
- if (href.substring(0, 2) === '//') {
|
|
|
- if (relativeBase) {
|
|
|
- return href;
|
|
|
- }
|
|
|
- return base.replace(protocol, '$1') + href;
|
|
|
- } else if (href.charAt(0) === '/') {
|
|
|
- if (relativeBase) {
|
|
|
- return href;
|
|
|
- }
|
|
|
- return base.replace(domain, '$1') + href;
|
|
|
- } else {
|
|
|
- return base + href;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-const noopTest$1 = { exec: function noopTest() {} };
|
|
|
-
|
|
|
-function merge$2(obj) {
|
|
|
- let i = 1,
|
|
|
- target,
|
|
|
- key;
|
|
|
-
|
|
|
- for (; i < arguments.length; i++) {
|
|
|
- target = arguments[i];
|
|
|
- for (key in target) {
|
|
|
- if (Object.prototype.hasOwnProperty.call(target, key)) {
|
|
|
- obj[key] = target[key];
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return obj;
|
|
|
-}
|
|
|
-
|
|
|
-function splitCells$1(tableRow, count) {
|
|
|
- // ensure that every cell-delimiting pipe has a space
|
|
|
- // before it to distinguish it from an escaped pipe
|
|
|
- const row = tableRow.replace(/\|/g, (match, offset, str) => {
|
|
|
- let escaped = false,
|
|
|
- curr = offset;
|
|
|
- while (--curr >= 0 && str[curr] === '\\') escaped = !escaped;
|
|
|
- if (escaped) {
|
|
|
- // odd number of slashes means | is escaped
|
|
|
- // so we leave it alone
|
|
|
- return '|';
|
|
|
- } else {
|
|
|
- // add space before unescaped |
|
|
|
- return ' |';
|
|
|
- }
|
|
|
- }),
|
|
|
- cells = row.split(/ \|/);
|
|
|
- let i = 0;
|
|
|
-
|
|
|
- if (cells.length > count) {
|
|
|
- cells.splice(count);
|
|
|
- } else {
|
|
|
- while (cells.length < count) cells.push('');
|
|
|
- }
|
|
|
-
|
|
|
- for (; i < cells.length; i++) {
|
|
|
- // leading or trailing whitespace is ignored per the gfm spec
|
|
|
- cells[i] = cells[i].trim().replace(/\\\|/g, '|');
|
|
|
- }
|
|
|
- return cells;
|
|
|
-}
|
|
|
-
|
|
|
-// Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
|
|
|
-// /c*$/ is vulnerable to REDOS.
|
|
|
-// invert: Remove suffix of non-c chars instead. Default falsey.
|
|
|
-function rtrim$1(str, c, invert) {
|
|
|
- const l = str.length;
|
|
|
- if (l === 0) {
|
|
|
- return '';
|
|
|
- }
|
|
|
-
|
|
|
- // Length of suffix matching the invert condition.
|
|
|
- let suffLen = 0;
|
|
|
-
|
|
|
- // Step left until we fail to match the invert condition.
|
|
|
- while (suffLen < l) {
|
|
|
- const currChar = str.charAt(l - suffLen - 1);
|
|
|
- if (currChar === c && !invert) {
|
|
|
- suffLen++;
|
|
|
- } else if (currChar !== c && invert) {
|
|
|
- suffLen++;
|
|
|
- } else {
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return str.substr(0, l - suffLen);
|
|
|
-}
|
|
|
-
|
|
|
-function findClosingBracket$1(str, b) {
|
|
|
- if (str.indexOf(b[1]) === -1) {
|
|
|
- return -1;
|
|
|
- }
|
|
|
- const l = str.length;
|
|
|
- let level = 0,
|
|
|
- i = 0;
|
|
|
- for (; i < l; i++) {
|
|
|
- if (str[i] === '\\') {
|
|
|
- i++;
|
|
|
- } else if (str[i] === b[0]) {
|
|
|
- level++;
|
|
|
- } else if (str[i] === b[1]) {
|
|
|
- level--;
|
|
|
- if (level < 0) {
|
|
|
- return i;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- return -1;
|
|
|
-}
|
|
|
-
|
|
|
-function checkSanitizeDeprecation$1(opt) {
|
|
|
- if (opt && opt.sanitize && !opt.silent) {
|
|
|
- console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// copied from https://stackoverflow.com/a/5450113/806777
|
|
|
-function repeatString$1(pattern, count) {
|
|
|
- if (count < 1) {
|
|
|
- return '';
|
|
|
- }
|
|
|
- let result = '';
|
|
|
- while (count > 1) {
|
|
|
- if (count & 1) {
|
|
|
- result += pattern;
|
|
|
- }
|
|
|
- count >>= 1;
|
|
|
- pattern += pattern;
|
|
|
- }
|
|
|
- return result + pattern;
|
|
|
-}
|
|
|
-
|
|
|
-var helpers = {
|
|
|
- escape: escape$3,
|
|
|
- unescape: unescape$1,
|
|
|
- edit: edit$1,
|
|
|
- cleanUrl: cleanUrl$1,
|
|
|
- resolveUrl,
|
|
|
- noopTest: noopTest$1,
|
|
|
- merge: merge$2,
|
|
|
- splitCells: splitCells$1,
|
|
|
- rtrim: rtrim$1,
|
|
|
- findClosingBracket: findClosingBracket$1,
|
|
|
- checkSanitizeDeprecation: checkSanitizeDeprecation$1,
|
|
|
- repeatString: repeatString$1
|
|
|
-};
|
|
|
-
|
|
|
-const { defaults: defaults$4 } = defaults$5.exports;
|
|
|
-const {
|
|
|
- rtrim,
|
|
|
- splitCells,
|
|
|
- escape: escape$2,
|
|
|
- findClosingBracket
|
|
|
-} = helpers;
|
|
|
-
|
|
|
-function outputLink(cap, link, raw) {
|
|
|
- const href = link.href;
|
|
|
- const title = link.title ? escape$2(link.title) : null;
|
|
|
- const text = cap[1].replace(/\\([\[\]])/g, '$1');
|
|
|
-
|
|
|
- if (cap[0].charAt(0) !== '!') {
|
|
|
- return {
|
|
|
- type: 'link',
|
|
|
- raw,
|
|
|
- href,
|
|
|
- title,
|
|
|
- text
|
|
|
- };
|
|
|
- } else {
|
|
|
- return {
|
|
|
- type: 'image',
|
|
|
- raw,
|
|
|
- href,
|
|
|
- title,
|
|
|
- text: escape$2(text)
|
|
|
- };
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-function indentCodeCompensation(raw, text) {
|
|
|
- const matchIndentToCode = raw.match(/^(\s+)(?:```)/);
|
|
|
-
|
|
|
- if (matchIndentToCode === null) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- const indentToCode = matchIndentToCode[1];
|
|
|
-
|
|
|
- return text
|
|
|
- .split('\n')
|
|
|
- .map(node => {
|
|
|
- const matchIndentInNode = node.match(/^\s+/);
|
|
|
- if (matchIndentInNode === null) {
|
|
|
- return node;
|
|
|
- }
|
|
|
-
|
|
|
- const [indentInNode] = matchIndentInNode;
|
|
|
-
|
|
|
- if (indentInNode.length >= indentToCode.length) {
|
|
|
- return node.slice(indentToCode.length);
|
|
|
- }
|
|
|
-
|
|
|
- return node;
|
|
|
- })
|
|
|
- .join('\n');
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * Tokenizer
|
|
|
- */
|
|
|
-var Tokenizer_1 = class Tokenizer {
|
|
|
- constructor(options) {
|
|
|
- this.options = options || defaults$4;
|
|
|
- }
|
|
|
-
|
|
|
- space(src) {
|
|
|
- const cap = this.rules.block.newline.exec(src);
|
|
|
- if (cap) {
|
|
|
- if (cap[0].length > 1) {
|
|
|
- return {
|
|
|
- type: 'space',
|
|
|
- raw: cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- return { raw: '\n' };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- code(src) {
|
|
|
- const cap = this.rules.block.code.exec(src);
|
|
|
- if (cap) {
|
|
|
- const text = cap[0].replace(/^ {1,4}/gm, '');
|
|
|
- return {
|
|
|
- type: 'code',
|
|
|
- raw: cap[0],
|
|
|
- codeBlockStyle: 'indented',
|
|
|
- text: !this.options.pedantic
|
|
|
- ? rtrim(text, '\n')
|
|
|
- : text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- fences(src) {
|
|
|
- const cap = this.rules.block.fences.exec(src);
|
|
|
- if (cap) {
|
|
|
- const raw = cap[0];
|
|
|
- const text = indentCodeCompensation(raw, cap[3] || '');
|
|
|
-
|
|
|
- return {
|
|
|
- type: 'code',
|
|
|
- raw,
|
|
|
- lang: cap[2] ? cap[2].trim() : cap[2],
|
|
|
- text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- heading(src) {
|
|
|
- const cap = this.rules.block.heading.exec(src);
|
|
|
- if (cap) {
|
|
|
- let text = cap[2].trim();
|
|
|
-
|
|
|
- // remove trailing #s
|
|
|
- if (/#$/.test(text)) {
|
|
|
- const trimmed = rtrim(text, '#');
|
|
|
- if (this.options.pedantic) {
|
|
|
- text = trimmed.trim();
|
|
|
- } else if (!trimmed || / $/.test(trimmed)) {
|
|
|
- // CommonMark requires space before trailing #s
|
|
|
- text = trimmed.trim();
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return {
|
|
|
- type: 'heading',
|
|
|
- raw: cap[0],
|
|
|
- depth: cap[1].length,
|
|
|
- text: text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- nptable(src) {
|
|
|
- const cap = this.rules.block.nptable.exec(src);
|
|
|
- if (cap) {
|
|
|
- const item = {
|
|
|
- type: 'table',
|
|
|
- header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')),
|
|
|
- align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
|
|
|
- cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : [],
|
|
|
- raw: cap[0]
|
|
|
- };
|
|
|
-
|
|
|
- if (item.header.length === item.align.length) {
|
|
|
- let l = item.align.length;
|
|
|
- let i;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- if (/^ *-+: *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'right';
|
|
|
- } else if (/^ *:-+: *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'center';
|
|
|
- } else if (/^ *:-+ *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'left';
|
|
|
- } else {
|
|
|
- item.align[i] = null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- l = item.cells.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- item.cells[i] = splitCells(item.cells[i], item.header.length);
|
|
|
- }
|
|
|
-
|
|
|
- return item;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- hr(src) {
|
|
|
- const cap = this.rules.block.hr.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'hr',
|
|
|
- raw: cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- blockquote(src) {
|
|
|
- const cap = this.rules.block.blockquote.exec(src);
|
|
|
- if (cap) {
|
|
|
- const text = cap[0].replace(/^ *> ?/gm, '');
|
|
|
-
|
|
|
- return {
|
|
|
- type: 'blockquote',
|
|
|
- raw: cap[0],
|
|
|
- text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- list(src) {
|
|
|
- const cap = this.rules.block.list.exec(src);
|
|
|
- if (cap) {
|
|
|
- let raw = cap[0];
|
|
|
- const bull = cap[2];
|
|
|
- const isordered = bull.length > 1;
|
|
|
-
|
|
|
- const list = {
|
|
|
- type: 'list',
|
|
|
- raw,
|
|
|
- ordered: isordered,
|
|
|
- start: isordered ? +bull.slice(0, -1) : '',
|
|
|
- loose: false,
|
|
|
- items: []
|
|
|
- };
|
|
|
-
|
|
|
- // Get each top-level item.
|
|
|
- const itemMatch = cap[0].match(this.rules.block.item);
|
|
|
-
|
|
|
- let next = false,
|
|
|
- item,
|
|
|
- space,
|
|
|
- bcurr,
|
|
|
- bnext,
|
|
|
- addBack,
|
|
|
- loose,
|
|
|
- istask,
|
|
|
- ischecked,
|
|
|
- endMatch;
|
|
|
-
|
|
|
- let l = itemMatch.length;
|
|
|
- bcurr = this.rules.block.listItemStart.exec(itemMatch[0]);
|
|
|
- for (let i = 0; i < l; i++) {
|
|
|
- item = itemMatch[i];
|
|
|
- raw = item;
|
|
|
-
|
|
|
- if (!this.options.pedantic) {
|
|
|
- // Determine if current item contains the end of the list
|
|
|
- endMatch = item.match(new RegExp('\\n\\s*\\n {0,' + (bcurr[0].length - 1) + '}\\S'));
|
|
|
- if (endMatch) {
|
|
|
- addBack = item.length - endMatch.index + itemMatch.slice(i + 1).join('\n').length;
|
|
|
- list.raw = list.raw.substring(0, list.raw.length - addBack);
|
|
|
-
|
|
|
- item = item.substring(0, endMatch.index);
|
|
|
- raw = item;
|
|
|
- l = i + 1;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // Determine whether the next list item belongs here.
|
|
|
- // Backpedal if it does not belong in this list.
|
|
|
- if (i !== l - 1) {
|
|
|
- bnext = this.rules.block.listItemStart.exec(itemMatch[i + 1]);
|
|
|
- if (
|
|
|
- !this.options.pedantic
|
|
|
- ? bnext[1].length >= bcurr[0].length || bnext[1].length > 3
|
|
|
- : bnext[1].length > bcurr[1].length
|
|
|
- ) {
|
|
|
- // nested list or continuation
|
|
|
- itemMatch.splice(i, 2, itemMatch[i] + (!this.options.pedantic && bnext[1].length < bcurr[0].length && !itemMatch[i].match(/\n$/) ? '' : '\n') + itemMatch[i + 1]);
|
|
|
- i--;
|
|
|
- l--;
|
|
|
- continue;
|
|
|
- } else if (
|
|
|
- // different bullet style
|
|
|
- !this.options.pedantic || this.options.smartLists
|
|
|
- ? bnext[2][bnext[2].length - 1] !== bull[bull.length - 1]
|
|
|
- : isordered === (bnext[2].length === 1)
|
|
|
- ) {
|
|
|
- addBack = itemMatch.slice(i + 1).join('\n').length;
|
|
|
- list.raw = list.raw.substring(0, list.raw.length - addBack);
|
|
|
- i = l - 1;
|
|
|
- }
|
|
|
- bcurr = bnext;
|
|
|
- }
|
|
|
-
|
|
|
- // Remove the list item's bullet
|
|
|
- // so it is seen as the next token.
|
|
|
- space = item.length;
|
|
|
- item = item.replace(/^ *([*+-]|\d+[.)]) ?/, '');
|
|
|
-
|
|
|
- // Outdent whatever the
|
|
|
- // list item contains. Hacky.
|
|
|
- if (~item.indexOf('\n ')) {
|
|
|
- space -= item.length;
|
|
|
- item = !this.options.pedantic
|
|
|
- ? item.replace(new RegExp('^ {1,' + space + '}', 'gm'), '')
|
|
|
- : item.replace(/^ {1,4}/gm, '');
|
|
|
- }
|
|
|
-
|
|
|
- // trim item newlines at end
|
|
|
- item = rtrim(item, '\n');
|
|
|
- if (i !== l - 1) {
|
|
|
- raw = raw + '\n';
|
|
|
- }
|
|
|
-
|
|
|
- // Determine whether item is loose or not.
|
|
|
- // Use: /(^|\n)(?! )[^\n]+\n\n(?!\s*$)/
|
|
|
- // for discount behavior.
|
|
|
- loose = next || /\n\n(?!\s*$)/.test(raw);
|
|
|
- if (i !== l - 1) {
|
|
|
- next = raw.slice(-2) === '\n\n';
|
|
|
- if (!loose) loose = next;
|
|
|
- }
|
|
|
-
|
|
|
- if (loose) {
|
|
|
- list.loose = true;
|
|
|
- }
|
|
|
-
|
|
|
- // Check for task list items
|
|
|
- if (this.options.gfm) {
|
|
|
- istask = /^\[[ xX]\] /.test(item);
|
|
|
- ischecked = undefined;
|
|
|
- if (istask) {
|
|
|
- ischecked = item[1] !== ' ';
|
|
|
- item = item.replace(/^\[[ xX]\] +/, '');
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- list.items.push({
|
|
|
- type: 'list_item',
|
|
|
- raw,
|
|
|
- task: istask,
|
|
|
- checked: ischecked,
|
|
|
- loose: loose,
|
|
|
- text: item
|
|
|
- });
|
|
|
- }
|
|
|
-
|
|
|
- return list;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- html(src) {
|
|
|
- const cap = this.rules.block.html.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: this.options.sanitize
|
|
|
- ? 'paragraph'
|
|
|
- : 'html',
|
|
|
- raw: cap[0],
|
|
|
- pre: !this.options.sanitizer
|
|
|
- && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),
|
|
|
- text: this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- def(src) {
|
|
|
- const cap = this.rules.block.def.exec(src);
|
|
|
- if (cap) {
|
|
|
- if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1);
|
|
|
- const tag = cap[1].toLowerCase().replace(/\s+/g, ' ');
|
|
|
- return {
|
|
|
- type: 'def',
|
|
|
- tag,
|
|
|
- raw: cap[0],
|
|
|
- href: cap[2],
|
|
|
- title: cap[3]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- table(src) {
|
|
|
- const cap = this.rules.block.table.exec(src);
|
|
|
- if (cap) {
|
|
|
- const item = {
|
|
|
- type: 'table',
|
|
|
- header: splitCells(cap[1].replace(/^ *| *\| *$/g, '')),
|
|
|
- align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
|
|
|
- cells: cap[3] ? cap[3].replace(/\n$/, '').split('\n') : []
|
|
|
- };
|
|
|
-
|
|
|
- if (item.header.length === item.align.length) {
|
|
|
- item.raw = cap[0];
|
|
|
-
|
|
|
- let l = item.align.length;
|
|
|
- let i;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- if (/^ *-+: *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'right';
|
|
|
- } else if (/^ *:-+: *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'center';
|
|
|
- } else if (/^ *:-+ *$/.test(item.align[i])) {
|
|
|
- item.align[i] = 'left';
|
|
|
- } else {
|
|
|
- item.align[i] = null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- l = item.cells.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- item.cells[i] = splitCells(
|
|
|
- item.cells[i].replace(/^ *\| *| *\| *$/g, ''),
|
|
|
- item.header.length);
|
|
|
- }
|
|
|
-
|
|
|
- return item;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- lheading(src) {
|
|
|
- const cap = this.rules.block.lheading.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'heading',
|
|
|
- raw: cap[0],
|
|
|
- depth: cap[2].charAt(0) === '=' ? 1 : 2,
|
|
|
- text: cap[1]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- paragraph(src) {
|
|
|
- const cap = this.rules.block.paragraph.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'paragraph',
|
|
|
- raw: cap[0],
|
|
|
- text: cap[1].charAt(cap[1].length - 1) === '\n'
|
|
|
- ? cap[1].slice(0, -1)
|
|
|
- : cap[1]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- text(src) {
|
|
|
- const cap = this.rules.block.text.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'text',
|
|
|
- raw: cap[0],
|
|
|
- text: cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- escape(src) {
|
|
|
- const cap = this.rules.inline.escape.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'escape',
|
|
|
- raw: cap[0],
|
|
|
- text: escape$2(cap[1])
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- tag(src, inLink, inRawBlock) {
|
|
|
- const cap = this.rules.inline.tag.exec(src);
|
|
|
- if (cap) {
|
|
|
- if (!inLink && /^<a /i.test(cap[0])) {
|
|
|
- inLink = true;
|
|
|
- } else if (inLink && /^<\/a>/i.test(cap[0])) {
|
|
|
- inLink = false;
|
|
|
- }
|
|
|
- if (!inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
|
|
|
- inRawBlock = true;
|
|
|
- } else if (inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
|
|
|
- inRawBlock = false;
|
|
|
- }
|
|
|
-
|
|
|
- return {
|
|
|
- type: this.options.sanitize
|
|
|
- ? 'text'
|
|
|
- : 'html',
|
|
|
- raw: cap[0],
|
|
|
- inLink,
|
|
|
- inRawBlock,
|
|
|
- text: this.options.sanitize
|
|
|
- ? (this.options.sanitizer
|
|
|
- ? this.options.sanitizer(cap[0])
|
|
|
- : escape$2(cap[0]))
|
|
|
- : cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- link(src) {
|
|
|
- const cap = this.rules.inline.link.exec(src);
|
|
|
- if (cap) {
|
|
|
- const trimmedUrl = cap[2].trim();
|
|
|
- if (!this.options.pedantic && /^</.test(trimmedUrl)) {
|
|
|
- // commonmark requires matching angle brackets
|
|
|
- if (!(/>$/.test(trimmedUrl))) {
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- // ending angle bracket cannot be escaped
|
|
|
- const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\');
|
|
|
- if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
|
|
|
- return;
|
|
|
- }
|
|
|
- } else {
|
|
|
- // find closing parenthesis
|
|
|
- const lastParenIndex = findClosingBracket(cap[2], '()');
|
|
|
- if (lastParenIndex > -1) {
|
|
|
- const start = cap[0].indexOf('!') === 0 ? 5 : 4;
|
|
|
- const linkLen = start + cap[1].length + lastParenIndex;
|
|
|
- cap[2] = cap[2].substring(0, lastParenIndex);
|
|
|
- cap[0] = cap[0].substring(0, linkLen).trim();
|
|
|
- cap[3] = '';
|
|
|
- }
|
|
|
- }
|
|
|
- let href = cap[2];
|
|
|
- let title = '';
|
|
|
- if (this.options.pedantic) {
|
|
|
- // split pedantic href and title
|
|
|
- const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href);
|
|
|
-
|
|
|
- if (link) {
|
|
|
- href = link[1];
|
|
|
- title = link[3];
|
|
|
- }
|
|
|
- } else {
|
|
|
- title = cap[3] ? cap[3].slice(1, -1) : '';
|
|
|
- }
|
|
|
-
|
|
|
- href = href.trim();
|
|
|
- if (/^</.test(href)) {
|
|
|
- if (this.options.pedantic && !(/>$/.test(trimmedUrl))) {
|
|
|
- // pedantic allows starting angle bracket without ending angle bracket
|
|
|
- href = href.slice(1);
|
|
|
- } else {
|
|
|
- href = href.slice(1, -1);
|
|
|
- }
|
|
|
- }
|
|
|
- return outputLink(cap, {
|
|
|
- href: href ? href.replace(this.rules.inline._escapes, '$1') : href,
|
|
|
- title: title ? title.replace(this.rules.inline._escapes, '$1') : title
|
|
|
- }, cap[0]);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- reflink(src, links) {
|
|
|
- let cap;
|
|
|
- if ((cap = this.rules.inline.reflink.exec(src))
|
|
|
- || (cap = this.rules.inline.nolink.exec(src))) {
|
|
|
- let link = (cap[2] || cap[1]).replace(/\s+/g, ' ');
|
|
|
- link = links[link.toLowerCase()];
|
|
|
- if (!link || !link.href) {
|
|
|
- const text = cap[0].charAt(0);
|
|
|
- return {
|
|
|
- type: 'text',
|
|
|
- raw: text,
|
|
|
- text
|
|
|
- };
|
|
|
- }
|
|
|
- return outputLink(cap, link, cap[0]);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- emStrong(src, maskedSrc, prevChar = '') {
|
|
|
- let match = this.rules.inline.emStrong.lDelim.exec(src);
|
|
|
- if (!match) return;
|
|
|
-
|
|
|
- if (match[3] && prevChar.match(/[\p{L}\p{N}]/u)) return; // _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
|
|
|
-
|
|
|
- const nextChar = match[1] || match[2] || '';
|
|
|
-
|
|
|
- if (!nextChar || (nextChar && (prevChar === '' || this.rules.inline.punctuation.exec(prevChar)))) {
|
|
|
- const lLength = match[0].length - 1;
|
|
|
- let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;
|
|
|
-
|
|
|
- const endReg = match[0][0] === '*' ? this.rules.inline.emStrong.rDelimAst : this.rules.inline.emStrong.rDelimUnd;
|
|
|
- endReg.lastIndex = 0;
|
|
|
-
|
|
|
- maskedSrc = maskedSrc.slice(-1 * src.length + lLength); // Bump maskedSrc to same section of string as src (move to lexer?)
|
|
|
-
|
|
|
- while ((match = endReg.exec(maskedSrc)) != null) {
|
|
|
- rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];
|
|
|
-
|
|
|
- if (!rDelim) continue; // matched the first alternative in rules.js (skip the * in __abc*abc__)
|
|
|
-
|
|
|
- rLength = rDelim.length;
|
|
|
-
|
|
|
- if (match[3] || match[4]) { // found another Left Delim
|
|
|
- delimTotal += rLength;
|
|
|
- continue;
|
|
|
- } else if (match[5] || match[6]) { // either Left or Right Delim
|
|
|
- if (lLength % 3 && !((lLength + rLength) % 3)) {
|
|
|
- midDelimTotal += rLength;
|
|
|
- continue; // CommonMark Emphasis Rules 9-10
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- delimTotal -= rLength;
|
|
|
-
|
|
|
- if (delimTotal > 0) continue; // Haven't found enough closing delimiters
|
|
|
-
|
|
|
- // If this is the last rDelimiter, remove extra characters. *a*** -> *a*
|
|
|
- if (delimTotal + midDelimTotal - rLength <= 0 && !maskedSrc.slice(endReg.lastIndex).match(endReg)) {
|
|
|
- rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal);
|
|
|
- }
|
|
|
-
|
|
|
- if (Math.min(lLength, rLength) % 2) {
|
|
|
- return {
|
|
|
- type: 'em',
|
|
|
- raw: src.slice(0, lLength + match.index + rLength + 1),
|
|
|
- text: src.slice(1, lLength + match.index + rLength)
|
|
|
- };
|
|
|
- }
|
|
|
- if (Math.min(lLength, rLength) % 2 === 0) {
|
|
|
- return {
|
|
|
- type: 'strong',
|
|
|
- raw: src.slice(0, lLength + match.index + rLength + 1),
|
|
|
- text: src.slice(2, lLength + match.index + rLength - 1)
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- codespan(src) {
|
|
|
- const cap = this.rules.inline.code.exec(src);
|
|
|
- if (cap) {
|
|
|
- let text = cap[2].replace(/\n/g, ' ');
|
|
|
- const hasNonSpaceChars = /[^ ]/.test(text);
|
|
|
- const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text);
|
|
|
- if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
|
|
|
- text = text.substring(1, text.length - 1);
|
|
|
- }
|
|
|
- text = escape$2(text, true);
|
|
|
- return {
|
|
|
- type: 'codespan',
|
|
|
- raw: cap[0],
|
|
|
- text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- br(src) {
|
|
|
- const cap = this.rules.inline.br.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'br',
|
|
|
- raw: cap[0]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- del(src) {
|
|
|
- const cap = this.rules.inline.del.exec(src);
|
|
|
- if (cap) {
|
|
|
- return {
|
|
|
- type: 'del',
|
|
|
- raw: cap[0],
|
|
|
- text: cap[2]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- autolink(src, mangle) {
|
|
|
- const cap = this.rules.inline.autolink.exec(src);
|
|
|
- if (cap) {
|
|
|
- let text, href;
|
|
|
- if (cap[2] === '@') {
|
|
|
- text = escape$2(this.options.mangle ? mangle(cap[1]) : cap[1]);
|
|
|
- href = 'mailto:' + text;
|
|
|
- } else {
|
|
|
- text = escape$2(cap[1]);
|
|
|
- href = text;
|
|
|
- }
|
|
|
-
|
|
|
- return {
|
|
|
- type: 'link',
|
|
|
- raw: cap[0],
|
|
|
- text,
|
|
|
- href,
|
|
|
- tokens: [
|
|
|
- {
|
|
|
- type: 'text',
|
|
|
- raw: text,
|
|
|
- text
|
|
|
- }
|
|
|
- ]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- url(src, mangle) {
|
|
|
- let cap;
|
|
|
- if (cap = this.rules.inline.url.exec(src)) {
|
|
|
- let text, href;
|
|
|
- if (cap[2] === '@') {
|
|
|
- text = escape$2(this.options.mangle ? mangle(cap[0]) : cap[0]);
|
|
|
- href = 'mailto:' + text;
|
|
|
- } else {
|
|
|
- // do extended autolink path validation
|
|
|
- let prevCapZero;
|
|
|
- do {
|
|
|
- prevCapZero = cap[0];
|
|
|
- cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
|
|
|
- } while (prevCapZero !== cap[0]);
|
|
|
- text = escape$2(cap[0]);
|
|
|
- if (cap[1] === 'www.') {
|
|
|
- href = 'http://' + text;
|
|
|
- } else {
|
|
|
- href = text;
|
|
|
- }
|
|
|
- }
|
|
|
- return {
|
|
|
- type: 'link',
|
|
|
- raw: cap[0],
|
|
|
- text,
|
|
|
- href,
|
|
|
- tokens: [
|
|
|
- {
|
|
|
- type: 'text',
|
|
|
- raw: text,
|
|
|
- text
|
|
|
- }
|
|
|
- ]
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- inlineText(src, inRawBlock, smartypants) {
|
|
|
- const cap = this.rules.inline.text.exec(src);
|
|
|
- if (cap) {
|
|
|
- let text;
|
|
|
- if (inRawBlock) {
|
|
|
- text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape$2(cap[0])) : cap[0];
|
|
|
- } else {
|
|
|
- text = escape$2(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
|
|
|
- }
|
|
|
- return {
|
|
|
- type: 'text',
|
|
|
- raw: cap[0],
|
|
|
- text
|
|
|
- };
|
|
|
- }
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-const {
|
|
|
- noopTest,
|
|
|
- edit,
|
|
|
- merge: merge$1
|
|
|
-} = helpers;
|
|
|
-
|
|
|
-/**
|
|
|
- * Block-Level Grammar
|
|
|
- */
|
|
|
-const block$1 = {
|
|
|
- newline: /^(?: *(?:\n|$))+/,
|
|
|
- code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
|
|
|
- fences: /^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/,
|
|
|
- hr: /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/,
|
|
|
- heading: /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,
|
|
|
- blockquote: /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,
|
|
|
- list: /^( {0,3})(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?! {0,3}bull )\n*|\s*$)/,
|
|
|
- html: '^ {0,3}(?:' // optional indentation
|
|
|
- + '<(script|pre|style)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
|
|
|
- + '|comment[^\\n]*(\\n+|$)' // (2)
|
|
|
- + '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
|
|
|
- + '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
|
|
|
- + '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
|
|
|
- + '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
|
|
|
- + '|<(?!script|pre|style)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
|
|
|
- + '|</(?!script|pre|style)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
|
|
|
- + ')',
|
|
|
- def: /^ {0,3}\[(label)\]: *\n? *<?([^\s>]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/,
|
|
|
- nptable: noopTest,
|
|
|
- table: noopTest,
|
|
|
- lheading: /^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,
|
|
|
- // regex template, placeholders will be replaced according to different paragraph
|
|
|
- // interruption rules of commonmark and the original markdown spec:
|
|
|
- _paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html| +\n)[^\n]+)*)/,
|
|
|
- text: /^[^\n]+/
|
|
|
-};
|
|
|
-
|
|
|
-block$1._label = /(?!\s*\])(?:\\[\[\]]|[^\[\]])+/;
|
|
|
-block$1._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
|
|
|
-block$1.def = edit(block$1.def)
|
|
|
- .replace('label', block$1._label)
|
|
|
- .replace('title', block$1._title)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.bullet = /(?:[*+-]|\d{1,9}[.)])/;
|
|
|
-block$1.item = /^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/;
|
|
|
-block$1.item = edit(block$1.item, 'gm')
|
|
|
- .replace(/bull/g, block$1.bullet)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.listItemStart = edit(/^( *)(bull) */)
|
|
|
- .replace('bull', block$1.bullet)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.list = edit(block$1.list)
|
|
|
- .replace(/bull/g, block$1.bullet)
|
|
|
- .replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))')
|
|
|
- .replace('def', '\\n+(?=' + block$1.def.source + ')')
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
|
|
|
- + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
|
|
|
- + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
|
|
|
- + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
|
|
|
- + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
|
|
|
- + '|track|ul';
|
|
|
-block$1._comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/;
|
|
|
-block$1.html = edit(block$1.html, 'i')
|
|
|
- .replace('comment', block$1._comment)
|
|
|
- .replace('tag', block$1._tag)
|
|
|
- .replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.paragraph = edit(block$1._paragraph)
|
|
|
- .replace('hr', block$1.hr)
|
|
|
- .replace('heading', ' {0,3}#{1,6} ')
|
|
|
- .replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
|
|
|
- .replace('blockquote', ' {0,3}>')
|
|
|
- .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
|
|
|
- .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
|
|
|
- .replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)')
|
|
|
- .replace('tag', block$1._tag) // pars can be interrupted by type (6) html blocks
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.blockquote = edit(block$1.blockquote)
|
|
|
- .replace('paragraph', block$1.paragraph)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-/**
|
|
|
- * Normal Block Grammar
|
|
|
- */
|
|
|
-
|
|
|
-block$1.normal = merge$1({}, block$1);
|
|
|
-
|
|
|
-/**
|
|
|
- * GFM Block Grammar
|
|
|
- */
|
|
|
-
|
|
|
-block$1.gfm = merge$1({}, block$1.normal, {
|
|
|
- nptable: '^ *([^|\\n ].*\\|.*)\\n' // Header
|
|
|
- + ' {0,3}([-:]+ *\\|[-| :]*)' // Align
|
|
|
- + '(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)', // Cells
|
|
|
- table: '^ *\\|(.+)\\n' // Header
|
|
|
- + ' {0,3}\\|?( *[-:]+[-| :]*)' // Align
|
|
|
- + '(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
|
|
|
-});
|
|
|
-
|
|
|
-block$1.gfm.nptable = edit(block$1.gfm.nptable)
|
|
|
- .replace('hr', block$1.hr)
|
|
|
- .replace('heading', ' {0,3}#{1,6} ')
|
|
|
- .replace('blockquote', ' {0,3}>')
|
|
|
- .replace('code', ' {4}[^\\n]')
|
|
|
- .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
|
|
|
- .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
|
|
|
- .replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)')
|
|
|
- .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-block$1.gfm.table = edit(block$1.gfm.table)
|
|
|
- .replace('hr', block$1.hr)
|
|
|
- .replace('heading', ' {0,3}#{1,6} ')
|
|
|
- .replace('blockquote', ' {0,3}>')
|
|
|
- .replace('code', ' {4}[^\\n]')
|
|
|
- .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
|
|
|
- .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
|
|
|
- .replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|!--)')
|
|
|
- .replace('tag', block$1._tag) // tables can be interrupted by type (6) html blocks
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-/**
|
|
|
- * Pedantic grammar (original John Gruber's loose markdown specification)
|
|
|
- */
|
|
|
-
|
|
|
-block$1.pedantic = merge$1({}, block$1.normal, {
|
|
|
- html: edit(
|
|
|
- '^ *(?:comment *(?:\\n|\\s*$)'
|
|
|
- + '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
|
|
|
- + '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))')
|
|
|
- .replace('comment', block$1._comment)
|
|
|
- .replace(/tag/g, '(?!(?:'
|
|
|
- + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
|
|
|
- + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
|
|
|
- + '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b')
|
|
|
- .getRegex(),
|
|
|
- def: /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,
|
|
|
- heading: /^(#{1,6})(.*)(?:\n+|$)/,
|
|
|
- fences: noopTest, // fences not supported
|
|
|
- paragraph: edit(block$1.normal._paragraph)
|
|
|
- .replace('hr', block$1.hr)
|
|
|
- .replace('heading', ' *#{1,6} *[^\n]')
|
|
|
- .replace('lheading', block$1.lheading)
|
|
|
- .replace('blockquote', ' {0,3}>')
|
|
|
- .replace('|fences', '')
|
|
|
- .replace('|list', '')
|
|
|
- .replace('|html', '')
|
|
|
- .getRegex()
|
|
|
-});
|
|
|
-
|
|
|
-/**
|
|
|
- * Inline-Level Grammar
|
|
|
- */
|
|
|
-const inline$1 = {
|
|
|
- escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,
|
|
|
- autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/,
|
|
|
- url: noopTest,
|
|
|
- tag: '^comment'
|
|
|
- + '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
|
|
|
- + '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
|
|
|
- + '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
|
|
|
- + '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
|
|
|
- + '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>', // CDATA section
|
|
|
- link: /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,
|
|
|
- reflink: /^!?\[(label)\]\[(?!\s*\])((?:\\[\[\]]?|[^\[\]\\])+)\]/,
|
|
|
- nolink: /^!?\[(?!\s*\])((?:\[[^\[\]]*\]|\\[\[\]]|[^\[\]])*)\](?:\[\])?/,
|
|
|
- reflinkSearch: 'reflink|nolink(?!\\()',
|
|
|
- emStrong: {
|
|
|
- lDelim: /^(?:\*+(?:([punct_])|[^\s*]))|^_+(?:([punct*])|([^\s_]))/,
|
|
|
- // (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
|
|
|
- // () Skip other delimiter (1) #*** (2) a***#, a*** (3) #***a, ***a (4) ***# (5) #***# (6) a***a
|
|
|
- rDelimAst: /\_\_[^_]*?\*[^_]*?\_\_|[punct_](\*+)(?=[\s]|$)|[^punct*_\s](\*+)(?=[punct_\s]|$)|[punct_\s](\*+)(?=[^punct*_\s])|[\s](\*+)(?=[punct_])|[punct_](\*+)(?=[punct_])|[^punct*_\s](\*+)(?=[^punct*_\s])/,
|
|
|
- rDelimUnd: /\*\*[^*]*?\_[^*]*?\*\*|[punct*](\_+)(?=[\s]|$)|[^punct*_\s](\_+)(?=[punct*\s]|$)|[punct*\s](\_+)(?=[^punct*_\s])|[\s](\_+)(?=[punct*])|[punct*](\_+)(?=[punct*])/ // ^- Not allowed for _
|
|
|
- },
|
|
|
- code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,
|
|
|
- br: /^( {2,}|\\)\n(?!\s*$)/,
|
|
|
- del: noopTest,
|
|
|
- text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,
|
|
|
- punctuation: /^([\spunctuation])/
|
|
|
-};
|
|
|
-
|
|
|
-// list of punctuation marks from CommonMark spec
|
|
|
-// without * and _ to handle the different emphasis markers * and _
|
|
|
-inline$1._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~';
|
|
|
-inline$1.punctuation = edit(inline$1.punctuation).replace(/punctuation/g, inline$1._punctuation).getRegex();
|
|
|
-
|
|
|
-// sequences em should skip over [title](link), `code`, <html>
|
|
|
-inline$1.blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g;
|
|
|
-inline$1.escapedEmSt = /\\\*|\\_/g;
|
|
|
-
|
|
|
-inline$1._comment = edit(block$1._comment).replace('(?:-->|$)', '-->').getRegex();
|
|
|
-
|
|
|
-inline$1.emStrong.lDelim = edit(inline$1.emStrong.lDelim)
|
|
|
- .replace(/punct/g, inline$1._punctuation)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1.emStrong.rDelimAst = edit(inline$1.emStrong.rDelimAst, 'g')
|
|
|
- .replace(/punct/g, inline$1._punctuation)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1.emStrong.rDelimUnd = edit(inline$1.emStrong.rDelimUnd, 'g')
|
|
|
- .replace(/punct/g, inline$1._punctuation)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1._escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g;
|
|
|
-
|
|
|
-inline$1._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/;
|
|
|
-inline$1._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/;
|
|
|
-inline$1.autolink = edit(inline$1.autolink)
|
|
|
- .replace('scheme', inline$1._scheme)
|
|
|
- .replace('email', inline$1._email)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/;
|
|
|
-
|
|
|
-inline$1.tag = edit(inline$1.tag)
|
|
|
- .replace('comment', inline$1._comment)
|
|
|
- .replace('attribute', inline$1._attribute)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;
|
|
|
-inline$1._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/;
|
|
|
-inline$1._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/;
|
|
|
-
|
|
|
-inline$1.link = edit(inline$1.link)
|
|
|
- .replace('label', inline$1._label)
|
|
|
- .replace('href', inline$1._href)
|
|
|
- .replace('title', inline$1._title)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1.reflink = edit(inline$1.reflink)
|
|
|
- .replace('label', inline$1._label)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-inline$1.reflinkSearch = edit(inline$1.reflinkSearch, 'g')
|
|
|
- .replace('reflink', inline$1.reflink)
|
|
|
- .replace('nolink', inline$1.nolink)
|
|
|
- .getRegex();
|
|
|
-
|
|
|
-/**
|
|
|
- * Normal Inline Grammar
|
|
|
- */
|
|
|
-
|
|
|
-inline$1.normal = merge$1({}, inline$1);
|
|
|
-
|
|
|
-/**
|
|
|
- * Pedantic Inline Grammar
|
|
|
- */
|
|
|
-
|
|
|
-inline$1.pedantic = merge$1({}, inline$1.normal, {
|
|
|
- strong: {
|
|
|
- start: /^__|\*\*/,
|
|
|
- middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,
|
|
|
- endAst: /\*\*(?!\*)/g,
|
|
|
- endUnd: /__(?!_)/g
|
|
|
- },
|
|
|
- em: {
|
|
|
- start: /^_|\*/,
|
|
|
- middle: /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,
|
|
|
- endAst: /\*(?!\*)/g,
|
|
|
- endUnd: /_(?!_)/g
|
|
|
- },
|
|
|
- link: edit(/^!?\[(label)\]\((.*?)\)/)
|
|
|
- .replace('label', inline$1._label)
|
|
|
- .getRegex(),
|
|
|
- reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/)
|
|
|
- .replace('label', inline$1._label)
|
|
|
- .getRegex()
|
|
|
-});
|
|
|
-
|
|
|
-/**
|
|
|
- * GFM Inline Grammar
|
|
|
- */
|
|
|
-
|
|
|
-inline$1.gfm = merge$1({}, inline$1.normal, {
|
|
|
- escape: edit(inline$1.escape).replace('])', '~|])').getRegex(),
|
|
|
- _extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,
|
|
|
- url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,
|
|
|
- _backpedal: /(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/,
|
|
|
- del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,
|
|
|
- text: /^([`~]+|[^`~])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@))|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@))/
|
|
|
-});
|
|
|
-
|
|
|
-inline$1.gfm.url = edit(inline$1.gfm.url, 'i')
|
|
|
- .replace('email', inline$1.gfm._extended_email)
|
|
|
- .getRegex();
|
|
|
-/**
|
|
|
- * GFM + Line Breaks Inline Grammar
|
|
|
- */
|
|
|
-
|
|
|
-inline$1.breaks = merge$1({}, inline$1.gfm, {
|
|
|
- br: edit(inline$1.br).replace('{2,}', '*').getRegex(),
|
|
|
- text: edit(inline$1.gfm.text)
|
|
|
- .replace('\\b_', '\\b_| {2,}\\n')
|
|
|
- .replace(/\{2,\}/g, '*')
|
|
|
- .getRegex()
|
|
|
-});
|
|
|
-
|
|
|
-var rules = {
|
|
|
- block: block$1,
|
|
|
- inline: inline$1
|
|
|
-};
|
|
|
-
|
|
|
-const Tokenizer$1 = Tokenizer_1;
|
|
|
-const { defaults: defaults$3 } = defaults$5.exports;
|
|
|
-const { block, inline } = rules;
|
|
|
-const { repeatString } = helpers;
|
|
|
-
|
|
|
-/**
|
|
|
- * smartypants text replacement
|
|
|
- */
|
|
|
-function smartypants(text) {
|
|
|
- return text
|
|
|
- // em-dashes
|
|
|
- .replace(/---/g, '\u2014')
|
|
|
- // en-dashes
|
|
|
- .replace(/--/g, '\u2013')
|
|
|
- // opening singles
|
|
|
- .replace(/(^|[-\u2014/(\[{"\s])'/g, '$1\u2018')
|
|
|
- // closing singles & apostrophes
|
|
|
- .replace(/'/g, '\u2019')
|
|
|
- // opening doubles
|
|
|
- .replace(/(^|[-\u2014/(\[{\u2018\s])"/g, '$1\u201c')
|
|
|
- // closing doubles
|
|
|
- .replace(/"/g, '\u201d')
|
|
|
- // ellipses
|
|
|
- .replace(/\.{3}/g, '\u2026');
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * mangle email addresses
|
|
|
- */
|
|
|
-function mangle(text) {
|
|
|
- let out = '',
|
|
|
- i,
|
|
|
- ch;
|
|
|
-
|
|
|
- const l = text.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- ch = text.charCodeAt(i);
|
|
|
- if (Math.random() > 0.5) {
|
|
|
- ch = 'x' + ch.toString(16);
|
|
|
- }
|
|
|
- out += '&#' + ch + ';';
|
|
|
- }
|
|
|
-
|
|
|
- return out;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * Block Lexer
|
|
|
- */
|
|
|
-var Lexer_1 = class Lexer {
|
|
|
- constructor(options) {
|
|
|
- this.tokens = [];
|
|
|
- this.tokens.links = Object.create(null);
|
|
|
- this.options = options || defaults$3;
|
|
|
- this.options.tokenizer = this.options.tokenizer || new Tokenizer$1();
|
|
|
- this.tokenizer = this.options.tokenizer;
|
|
|
- this.tokenizer.options = this.options;
|
|
|
-
|
|
|
- const rules = {
|
|
|
- block: block.normal,
|
|
|
- inline: inline.normal
|
|
|
- };
|
|
|
-
|
|
|
- if (this.options.pedantic) {
|
|
|
- rules.block = block.pedantic;
|
|
|
- rules.inline = inline.pedantic;
|
|
|
- } else if (this.options.gfm) {
|
|
|
- rules.block = block.gfm;
|
|
|
- if (this.options.breaks) {
|
|
|
- rules.inline = inline.breaks;
|
|
|
- } else {
|
|
|
- rules.inline = inline.gfm;
|
|
|
- }
|
|
|
- }
|
|
|
- this.tokenizer.rules = rules;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Expose Rules
|
|
|
- */
|
|
|
- static get rules() {
|
|
|
- return {
|
|
|
- block,
|
|
|
- inline
|
|
|
- };
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Static Lex Method
|
|
|
- */
|
|
|
- static lex(src, options) {
|
|
|
- const lexer = new Lexer(options);
|
|
|
- return lexer.lex(src);
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Static Lex Inline Method
|
|
|
- */
|
|
|
- static lexInline(src, options) {
|
|
|
- const lexer = new Lexer(options);
|
|
|
- return lexer.inlineTokens(src);
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Preprocessing
|
|
|
- */
|
|
|
- lex(src) {
|
|
|
- src = src
|
|
|
- .replace(/\r\n|\r/g, '\n')
|
|
|
- .replace(/\t/g, ' ');
|
|
|
-
|
|
|
- this.blockTokens(src, this.tokens, true);
|
|
|
-
|
|
|
- this.inline(this.tokens);
|
|
|
-
|
|
|
- return this.tokens;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Lexing
|
|
|
- */
|
|
|
- blockTokens(src, tokens = [], top = true) {
|
|
|
- if (this.options.pedantic) {
|
|
|
- src = src.replace(/^ +$/gm, '');
|
|
|
- }
|
|
|
- let token, i, l, lastToken;
|
|
|
-
|
|
|
- while (src) {
|
|
|
- // newline
|
|
|
- if (token = this.tokenizer.space(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- if (token.type) {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // code
|
|
|
- if (token = this.tokenizer.code(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- lastToken = tokens[tokens.length - 1];
|
|
|
- // An indented code block cannot interrupt a paragraph.
|
|
|
- if (lastToken && lastToken.type === 'paragraph') {
|
|
|
- lastToken.raw += '\n' + token.raw;
|
|
|
- lastToken.text += '\n' + token.text;
|
|
|
- } else {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // fences
|
|
|
- if (token = this.tokenizer.fences(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // heading
|
|
|
- if (token = this.tokenizer.heading(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // table no leading pipe (gfm)
|
|
|
- if (token = this.tokenizer.nptable(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // hr
|
|
|
- if (token = this.tokenizer.hr(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // blockquote
|
|
|
- if (token = this.tokenizer.blockquote(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- token.tokens = this.blockTokens(token.text, [], top);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // list
|
|
|
- if (token = this.tokenizer.list(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- l = token.items.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- token.items[i].tokens = this.blockTokens(token.items[i].text, [], false);
|
|
|
- }
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // html
|
|
|
- if (token = this.tokenizer.html(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // def
|
|
|
- if (top && (token = this.tokenizer.def(src))) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- if (!this.tokens.links[token.tag]) {
|
|
|
- this.tokens.links[token.tag] = {
|
|
|
- href: token.href,
|
|
|
- title: token.title
|
|
|
- };
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // table (gfm)
|
|
|
- if (token = this.tokenizer.table(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // lheading
|
|
|
- if (token = this.tokenizer.lheading(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // top-level paragraph
|
|
|
- if (top && (token = this.tokenizer.paragraph(src))) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // text
|
|
|
- if (token = this.tokenizer.text(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- lastToken = tokens[tokens.length - 1];
|
|
|
- if (lastToken && lastToken.type === 'text') {
|
|
|
- lastToken.raw += '\n' + token.raw;
|
|
|
- lastToken.text += '\n' + token.text;
|
|
|
- } else {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (src) {
|
|
|
- const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
|
|
|
- if (this.options.silent) {
|
|
|
- console.error(errMsg);
|
|
|
- break;
|
|
|
- } else {
|
|
|
- throw new Error(errMsg);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return tokens;
|
|
|
- }
|
|
|
-
|
|
|
- inline(tokens) {
|
|
|
- let i,
|
|
|
- j,
|
|
|
- k,
|
|
|
- l2,
|
|
|
- row,
|
|
|
- token;
|
|
|
-
|
|
|
- const l = tokens.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- token = tokens[i];
|
|
|
- switch (token.type) {
|
|
|
- case 'paragraph':
|
|
|
- case 'text':
|
|
|
- case 'heading': {
|
|
|
- token.tokens = [];
|
|
|
- this.inlineTokens(token.text, token.tokens);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'table': {
|
|
|
- token.tokens = {
|
|
|
- header: [],
|
|
|
- cells: []
|
|
|
- };
|
|
|
-
|
|
|
- // header
|
|
|
- l2 = token.header.length;
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- token.tokens.header[j] = [];
|
|
|
- this.inlineTokens(token.header[j], token.tokens.header[j]);
|
|
|
- }
|
|
|
-
|
|
|
- // cells
|
|
|
- l2 = token.cells.length;
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- row = token.cells[j];
|
|
|
- token.tokens.cells[j] = [];
|
|
|
- for (k = 0; k < row.length; k++) {
|
|
|
- token.tokens.cells[j][k] = [];
|
|
|
- this.inlineTokens(row[k], token.tokens.cells[j][k]);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'blockquote': {
|
|
|
- this.inline(token.tokens);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'list': {
|
|
|
- l2 = token.items.length;
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- this.inline(token.items[j].tokens);
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return tokens;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Lexing/Compiling
|
|
|
- */
|
|
|
- inlineTokens(src, tokens = [], inLink = false, inRawBlock = false) {
|
|
|
- let token, lastToken;
|
|
|
-
|
|
|
- // String with links masked to avoid interference with em and strong
|
|
|
- let maskedSrc = src;
|
|
|
- let match;
|
|
|
- let keepPrevChar, prevChar;
|
|
|
-
|
|
|
- // Mask out reflinks
|
|
|
- if (this.tokens.links) {
|
|
|
- const links = Object.keys(this.tokens.links);
|
|
|
- if (links.length > 0) {
|
|
|
- while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) {
|
|
|
- if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) {
|
|
|
- maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- // Mask out other blocks
|
|
|
- while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) {
|
|
|
- maskedSrc = maskedSrc.slice(0, match.index) + '[' + repeatString('a', match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);
|
|
|
- }
|
|
|
-
|
|
|
- // Mask out escaped em & strong delimiters
|
|
|
- while ((match = this.tokenizer.rules.inline.escapedEmSt.exec(maskedSrc)) != null) {
|
|
|
- maskedSrc = maskedSrc.slice(0, match.index) + '++' + maskedSrc.slice(this.tokenizer.rules.inline.escapedEmSt.lastIndex);
|
|
|
- }
|
|
|
-
|
|
|
- while (src) {
|
|
|
- if (!keepPrevChar) {
|
|
|
- prevChar = '';
|
|
|
- }
|
|
|
- keepPrevChar = false;
|
|
|
-
|
|
|
- // escape
|
|
|
- if (token = this.tokenizer.escape(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // tag
|
|
|
- if (token = this.tokenizer.tag(src, inLink, inRawBlock)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- inLink = token.inLink;
|
|
|
- inRawBlock = token.inRawBlock;
|
|
|
- const lastToken = tokens[tokens.length - 1];
|
|
|
- if (lastToken && token.type === 'text' && lastToken.type === 'text') {
|
|
|
- lastToken.raw += token.raw;
|
|
|
- lastToken.text += token.text;
|
|
|
- } else {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // link
|
|
|
- if (token = this.tokenizer.link(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- if (token.type === 'link') {
|
|
|
- token.tokens = this.inlineTokens(token.text, [], true, inRawBlock);
|
|
|
- }
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // reflink, nolink
|
|
|
- if (token = this.tokenizer.reflink(src, this.tokens.links)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- const lastToken = tokens[tokens.length - 1];
|
|
|
- if (token.type === 'link') {
|
|
|
- token.tokens = this.inlineTokens(token.text, [], true, inRawBlock);
|
|
|
- tokens.push(token);
|
|
|
- } else if (lastToken && token.type === 'text' && lastToken.type === 'text') {
|
|
|
- lastToken.raw += token.raw;
|
|
|
- lastToken.text += token.text;
|
|
|
- } else {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // em & strong
|
|
|
- if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // code
|
|
|
- if (token = this.tokenizer.codespan(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // br
|
|
|
- if (token = this.tokenizer.br(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // del (gfm)
|
|
|
- if (token = this.tokenizer.del(src)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- token.tokens = this.inlineTokens(token.text, [], inLink, inRawBlock);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // autolink
|
|
|
- if (token = this.tokenizer.autolink(src, mangle)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // url (gfm)
|
|
|
- if (!inLink && (token = this.tokenizer.url(src, mangle))) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- tokens.push(token);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // text
|
|
|
- if (token = this.tokenizer.inlineText(src, inRawBlock, smartypants)) {
|
|
|
- src = src.substring(token.raw.length);
|
|
|
- if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
|
|
|
- prevChar = token.raw.slice(-1);
|
|
|
- }
|
|
|
- keepPrevChar = true;
|
|
|
- lastToken = tokens[tokens.length - 1];
|
|
|
- if (lastToken && lastToken.type === 'text') {
|
|
|
- lastToken.raw += token.raw;
|
|
|
- lastToken.text += token.text;
|
|
|
- } else {
|
|
|
- tokens.push(token);
|
|
|
- }
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- if (src) {
|
|
|
- const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
|
|
|
- if (this.options.silent) {
|
|
|
- console.error(errMsg);
|
|
|
- break;
|
|
|
- } else {
|
|
|
- throw new Error(errMsg);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return tokens;
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-const { defaults: defaults$2 } = defaults$5.exports;
|
|
|
-const {
|
|
|
- cleanUrl,
|
|
|
- escape: escape$1
|
|
|
-} = helpers;
|
|
|
-
|
|
|
-/**
|
|
|
- * Renderer
|
|
|
- */
|
|
|
-var Renderer_1 = class Renderer {
|
|
|
- constructor(options) {
|
|
|
- this.options = options || defaults$2;
|
|
|
- }
|
|
|
-
|
|
|
- code(code, infostring, escaped) {
|
|
|
- const lang = (infostring || '').match(/\S*/)[0];
|
|
|
- if (this.options.highlight) {
|
|
|
- const out = this.options.highlight(code, lang);
|
|
|
- if (out != null && out !== code) {
|
|
|
- escaped = true;
|
|
|
- code = out;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- code = code.replace(/\n$/, '') + '\n';
|
|
|
-
|
|
|
- if (!lang) {
|
|
|
- return '<pre><code>'
|
|
|
- + (escaped ? code : escape$1(code, true))
|
|
|
- + '</code></pre>\n';
|
|
|
- }
|
|
|
-
|
|
|
- return '<pre><code class="'
|
|
|
- + this.options.langPrefix
|
|
|
- + escape$1(lang, true)
|
|
|
- + '">'
|
|
|
- + (escaped ? code : escape$1(code, true))
|
|
|
- + '</code></pre>\n';
|
|
|
- }
|
|
|
-
|
|
|
- blockquote(quote) {
|
|
|
- return '<blockquote>\n' + quote + '</blockquote>\n';
|
|
|
- }
|
|
|
-
|
|
|
- html(html) {
|
|
|
- return html;
|
|
|
- }
|
|
|
-
|
|
|
- heading(text, level, raw, slugger) {
|
|
|
- if (this.options.headerIds) {
|
|
|
- return '<h'
|
|
|
- + level
|
|
|
- + ' id="'
|
|
|
- + this.options.headerPrefix
|
|
|
- + slugger.slug(raw)
|
|
|
- + '">'
|
|
|
- + text
|
|
|
- + '</h'
|
|
|
- + level
|
|
|
- + '>\n';
|
|
|
- }
|
|
|
- // ignore IDs
|
|
|
- return '<h' + level + '>' + text + '</h' + level + '>\n';
|
|
|
- }
|
|
|
-
|
|
|
- hr() {
|
|
|
- return this.options.xhtml ? '<hr/>\n' : '<hr>\n';
|
|
|
- }
|
|
|
-
|
|
|
- list(body, ordered, start) {
|
|
|
- const type = ordered ? 'ol' : 'ul',
|
|
|
- startatt = (ordered && start !== 1) ? (' start="' + start + '"') : '';
|
|
|
- return '<' + type + startatt + '>\n' + body + '</' + type + '>\n';
|
|
|
- }
|
|
|
-
|
|
|
- listitem(text) {
|
|
|
- return '<li>' + text + '</li>\n';
|
|
|
- }
|
|
|
-
|
|
|
- checkbox(checked) {
|
|
|
- return '<input '
|
|
|
- + (checked ? 'checked="" ' : '')
|
|
|
- + 'disabled="" type="checkbox"'
|
|
|
- + (this.options.xhtml ? ' /' : '')
|
|
|
- + '> ';
|
|
|
- }
|
|
|
-
|
|
|
- paragraph(text) {
|
|
|
- return '<p>' + text + '</p>\n';
|
|
|
- }
|
|
|
-
|
|
|
- table(header, body) {
|
|
|
- if (body) body = '<tbody>' + body + '</tbody>';
|
|
|
-
|
|
|
- return '<table>\n'
|
|
|
- + '<thead>\n'
|
|
|
- + header
|
|
|
- + '</thead>\n'
|
|
|
- + body
|
|
|
- + '</table>\n';
|
|
|
- }
|
|
|
-
|
|
|
- tablerow(content) {
|
|
|
- return '<tr>\n' + content + '</tr>\n';
|
|
|
- }
|
|
|
-
|
|
|
- tablecell(content, flags) {
|
|
|
- const type = flags.header ? 'th' : 'td';
|
|
|
- const tag = flags.align
|
|
|
- ? '<' + type + ' align="' + flags.align + '">'
|
|
|
- : '<' + type + '>';
|
|
|
- return tag + content + '</' + type + '>\n';
|
|
|
- }
|
|
|
-
|
|
|
- // span level renderer
|
|
|
- strong(text) {
|
|
|
- return '<strong>' + text + '</strong>';
|
|
|
- }
|
|
|
-
|
|
|
- em(text) {
|
|
|
- return '<em>' + text + '</em>';
|
|
|
- }
|
|
|
-
|
|
|
- codespan(text) {
|
|
|
- return '<code>' + text + '</code>';
|
|
|
- }
|
|
|
-
|
|
|
- br() {
|
|
|
- return this.options.xhtml ? '<br/>' : '<br>';
|
|
|
- }
|
|
|
-
|
|
|
- del(text) {
|
|
|
- return '<del>' + text + '</del>';
|
|
|
- }
|
|
|
-
|
|
|
- link(href, title, text) {
|
|
|
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
|
|
- if (href === null) {
|
|
|
- return text;
|
|
|
- }
|
|
|
- let out = '<a href="' + escape$1(href) + '"';
|
|
|
- if (title) {
|
|
|
- out += ' title="' + title + '"';
|
|
|
- }
|
|
|
- out += '>' + text + '</a>';
|
|
|
- return out;
|
|
|
- }
|
|
|
-
|
|
|
- image(href, title, text) {
|
|
|
- href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
|
|
|
- if (href === null) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- let out = '<img src="' + href + '" alt="' + text + '"';
|
|
|
- if (title) {
|
|
|
- out += ' title="' + title + '"';
|
|
|
- }
|
|
|
- out += this.options.xhtml ? '/>' : '>';
|
|
|
- return out;
|
|
|
- }
|
|
|
-
|
|
|
- text(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * TextRenderer
|
|
|
- * returns only the textual part of the token
|
|
|
- */
|
|
|
-
|
|
|
-var TextRenderer_1 = class TextRenderer {
|
|
|
- // no need for block level renderers
|
|
|
- strong(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- em(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- codespan(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- del(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- html(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- text(text) {
|
|
|
- return text;
|
|
|
- }
|
|
|
-
|
|
|
- link(href, title, text) {
|
|
|
- return '' + text;
|
|
|
- }
|
|
|
-
|
|
|
- image(href, title, text) {
|
|
|
- return '' + text;
|
|
|
- }
|
|
|
-
|
|
|
- br() {
|
|
|
- return '';
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * Slugger generates header id
|
|
|
- */
|
|
|
-
|
|
|
-var Slugger_1 = class Slugger {
|
|
|
- constructor() {
|
|
|
- this.seen = {};
|
|
|
- }
|
|
|
-
|
|
|
- serialize(value) {
|
|
|
- return value
|
|
|
- .toLowerCase()
|
|
|
- .trim()
|
|
|
- // remove html tags
|
|
|
- .replace(/<[!\/a-z].*?>/ig, '')
|
|
|
- // remove unwanted chars
|
|
|
- .replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g, '')
|
|
|
- .replace(/\s/g, '-');
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Finds the next safe (unique) slug to use
|
|
|
- */
|
|
|
- getNextSafeSlug(originalSlug, isDryRun) {
|
|
|
- let slug = originalSlug;
|
|
|
- let occurenceAccumulator = 0;
|
|
|
- if (this.seen.hasOwnProperty(slug)) {
|
|
|
- occurenceAccumulator = this.seen[originalSlug];
|
|
|
- do {
|
|
|
- occurenceAccumulator++;
|
|
|
- slug = originalSlug + '-' + occurenceAccumulator;
|
|
|
- } while (this.seen.hasOwnProperty(slug));
|
|
|
- }
|
|
|
- if (!isDryRun) {
|
|
|
- this.seen[originalSlug] = occurenceAccumulator;
|
|
|
- this.seen[slug] = 0;
|
|
|
- }
|
|
|
- return slug;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Convert string to unique id
|
|
|
- * @param {object} options
|
|
|
- * @param {boolean} options.dryrun Generates the next unique slug without updating the internal accumulator.
|
|
|
- */
|
|
|
- slug(value, options = {}) {
|
|
|
- const slug = this.serialize(value);
|
|
|
- return this.getNextSafeSlug(slug, options.dryrun);
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-const Renderer$1 = Renderer_1;
|
|
|
-const TextRenderer$1 = TextRenderer_1;
|
|
|
-const Slugger$1 = Slugger_1;
|
|
|
-const { defaults: defaults$1 } = defaults$5.exports;
|
|
|
-const {
|
|
|
- unescape
|
|
|
-} = helpers;
|
|
|
-
|
|
|
-/**
|
|
|
- * Parsing & Compiling
|
|
|
- */
|
|
|
-var Parser_1 = class Parser {
|
|
|
- constructor(options) {
|
|
|
- this.options = options || defaults$1;
|
|
|
- this.options.renderer = this.options.renderer || new Renderer$1();
|
|
|
- this.renderer = this.options.renderer;
|
|
|
- this.renderer.options = this.options;
|
|
|
- this.textRenderer = new TextRenderer$1();
|
|
|
- this.slugger = new Slugger$1();
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Static Parse Method
|
|
|
- */
|
|
|
- static parse(tokens, options) {
|
|
|
- const parser = new Parser(options);
|
|
|
- return parser.parse(tokens);
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Static Parse Inline Method
|
|
|
- */
|
|
|
- static parseInline(tokens, options) {
|
|
|
- const parser = new Parser(options);
|
|
|
- return parser.parseInline(tokens);
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Parse Loop
|
|
|
- */
|
|
|
- parse(tokens, top = true) {
|
|
|
- let out = '',
|
|
|
- i,
|
|
|
- j,
|
|
|
- k,
|
|
|
- l2,
|
|
|
- l3,
|
|
|
- row,
|
|
|
- cell,
|
|
|
- header,
|
|
|
- body,
|
|
|
- token,
|
|
|
- ordered,
|
|
|
- start,
|
|
|
- loose,
|
|
|
- itemBody,
|
|
|
- item,
|
|
|
- checked,
|
|
|
- task,
|
|
|
- checkbox;
|
|
|
-
|
|
|
- const l = tokens.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- token = tokens[i];
|
|
|
- switch (token.type) {
|
|
|
- case 'space': {
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'hr': {
|
|
|
- out += this.renderer.hr();
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'heading': {
|
|
|
- out += this.renderer.heading(
|
|
|
- this.parseInline(token.tokens),
|
|
|
- token.depth,
|
|
|
- unescape(this.parseInline(token.tokens, this.textRenderer)),
|
|
|
- this.slugger);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'code': {
|
|
|
- out += this.renderer.code(token.text,
|
|
|
- token.lang,
|
|
|
- token.escaped);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'table': {
|
|
|
- header = '';
|
|
|
-
|
|
|
- // header
|
|
|
- cell = '';
|
|
|
- l2 = token.header.length;
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- cell += this.renderer.tablecell(
|
|
|
- this.parseInline(token.tokens.header[j]),
|
|
|
- { header: true, align: token.align[j] }
|
|
|
- );
|
|
|
- }
|
|
|
- header += this.renderer.tablerow(cell);
|
|
|
-
|
|
|
- body = '';
|
|
|
- l2 = token.cells.length;
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- row = token.tokens.cells[j];
|
|
|
-
|
|
|
- cell = '';
|
|
|
- l3 = row.length;
|
|
|
- for (k = 0; k < l3; k++) {
|
|
|
- cell += this.renderer.tablecell(
|
|
|
- this.parseInline(row[k]),
|
|
|
- { header: false, align: token.align[k] }
|
|
|
- );
|
|
|
- }
|
|
|
-
|
|
|
- body += this.renderer.tablerow(cell);
|
|
|
- }
|
|
|
- out += this.renderer.table(header, body);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'blockquote': {
|
|
|
- body = this.parse(token.tokens);
|
|
|
- out += this.renderer.blockquote(body);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'list': {
|
|
|
- ordered = token.ordered;
|
|
|
- start = token.start;
|
|
|
- loose = token.loose;
|
|
|
- l2 = token.items.length;
|
|
|
-
|
|
|
- body = '';
|
|
|
- for (j = 0; j < l2; j++) {
|
|
|
- item = token.items[j];
|
|
|
- checked = item.checked;
|
|
|
- task = item.task;
|
|
|
-
|
|
|
- itemBody = '';
|
|
|
- if (item.task) {
|
|
|
- checkbox = this.renderer.checkbox(checked);
|
|
|
- if (loose) {
|
|
|
- if (item.tokens.length > 0 && item.tokens[0].type === 'text') {
|
|
|
- item.tokens[0].text = checkbox + ' ' + item.tokens[0].text;
|
|
|
- if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') {
|
|
|
- item.tokens[0].tokens[0].text = checkbox + ' ' + item.tokens[0].tokens[0].text;
|
|
|
- }
|
|
|
- } else {
|
|
|
- item.tokens.unshift({
|
|
|
- type: 'text',
|
|
|
- text: checkbox
|
|
|
- });
|
|
|
- }
|
|
|
- } else {
|
|
|
- itemBody += checkbox;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- itemBody += this.parse(item.tokens, loose);
|
|
|
- body += this.renderer.listitem(itemBody, task, checked);
|
|
|
- }
|
|
|
-
|
|
|
- out += this.renderer.list(body, ordered, start);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'html': {
|
|
|
- // TODO parse inline content if parameter markdown=1
|
|
|
- out += this.renderer.html(token.text);
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'paragraph': {
|
|
|
- out += this.renderer.paragraph(this.parseInline(token.tokens));
|
|
|
- continue;
|
|
|
- }
|
|
|
- case 'text': {
|
|
|
- body = token.tokens ? this.parseInline(token.tokens) : token.text;
|
|
|
- while (i + 1 < l && tokens[i + 1].type === 'text') {
|
|
|
- token = tokens[++i];
|
|
|
- body += '\n' + (token.tokens ? this.parseInline(token.tokens) : token.text);
|
|
|
- }
|
|
|
- out += top ? this.renderer.paragraph(body) : body;
|
|
|
- continue;
|
|
|
- }
|
|
|
- default: {
|
|
|
- const errMsg = 'Token with "' + token.type + '" type was not found.';
|
|
|
- if (this.options.silent) {
|
|
|
- console.error(errMsg);
|
|
|
- return;
|
|
|
- } else {
|
|
|
- throw new Error(errMsg);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return out;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
- * Parse Inline Tokens
|
|
|
- */
|
|
|
- parseInline(tokens, renderer) {
|
|
|
- renderer = renderer || this.renderer;
|
|
|
- let out = '',
|
|
|
- i,
|
|
|
- token;
|
|
|
-
|
|
|
- const l = tokens.length;
|
|
|
- for (i = 0; i < l; i++) {
|
|
|
- token = tokens[i];
|
|
|
- switch (token.type) {
|
|
|
- case 'escape': {
|
|
|
- out += renderer.text(token.text);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'html': {
|
|
|
- out += renderer.html(token.text);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'link': {
|
|
|
- out += renderer.link(token.href, token.title, this.parseInline(token.tokens, renderer));
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'image': {
|
|
|
- out += renderer.image(token.href, token.title, token.text);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'strong': {
|
|
|
- out += renderer.strong(this.parseInline(token.tokens, renderer));
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'em': {
|
|
|
- out += renderer.em(this.parseInline(token.tokens, renderer));
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'codespan': {
|
|
|
- out += renderer.codespan(token.text);
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'br': {
|
|
|
- out += renderer.br();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'del': {
|
|
|
- out += renderer.del(this.parseInline(token.tokens, renderer));
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'text': {
|
|
|
- out += renderer.text(token.text);
|
|
|
- break;
|
|
|
- }
|
|
|
- default: {
|
|
|
- const errMsg = 'Token with "' + token.type + '" type was not found.';
|
|
|
- if (this.options.silent) {
|
|
|
- console.error(errMsg);
|
|
|
- return;
|
|
|
- } else {
|
|
|
- throw new Error(errMsg);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- return out;
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-const Lexer = Lexer_1;
|
|
|
-const Parser = Parser_1;
|
|
|
-const Tokenizer = Tokenizer_1;
|
|
|
-const Renderer = Renderer_1;
|
|
|
-const TextRenderer = TextRenderer_1;
|
|
|
-const Slugger = Slugger_1;
|
|
|
-const {
|
|
|
- merge,
|
|
|
- checkSanitizeDeprecation,
|
|
|
- escape
|
|
|
-} = helpers;
|
|
|
-const {
|
|
|
- getDefaults,
|
|
|
- changeDefaults,
|
|
|
- defaults
|
|
|
-} = defaults$5.exports;
|
|
|
-
|
|
|
-/**
|
|
|
- * Marked
|
|
|
- */
|
|
|
-function marked(src, opt, callback) {
|
|
|
- // throw error in case of non string input
|
|
|
- if (typeof src === 'undefined' || src === null) {
|
|
|
- throw new Error('marked(): input parameter is undefined or null');
|
|
|
- }
|
|
|
- if (typeof src !== 'string') {
|
|
|
- throw new Error('marked(): input parameter is of type '
|
|
|
- + Object.prototype.toString.call(src) + ', string expected');
|
|
|
- }
|
|
|
-
|
|
|
- if (typeof opt === 'function') {
|
|
|
- callback = opt;
|
|
|
- opt = null;
|
|
|
- }
|
|
|
-
|
|
|
- opt = merge({}, marked.defaults, opt || {});
|
|
|
- checkSanitizeDeprecation(opt);
|
|
|
-
|
|
|
- if (callback) {
|
|
|
- const highlight = opt.highlight;
|
|
|
- let tokens;
|
|
|
-
|
|
|
- try {
|
|
|
- tokens = Lexer.lex(src, opt);
|
|
|
- } catch (e) {
|
|
|
- return callback(e);
|
|
|
- }
|
|
|
-
|
|
|
- const done = function(err) {
|
|
|
- let out;
|
|
|
-
|
|
|
- if (!err) {
|
|
|
- try {
|
|
|
- if (opt.walkTokens) {
|
|
|
- marked.walkTokens(tokens, opt.walkTokens);
|
|
|
- }
|
|
|
- out = Parser.parse(tokens, opt);
|
|
|
- } catch (e) {
|
|
|
- err = e;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- opt.highlight = highlight;
|
|
|
-
|
|
|
- return err
|
|
|
- ? callback(err)
|
|
|
- : callback(null, out);
|
|
|
- };
|
|
|
-
|
|
|
- if (!highlight || highlight.length < 3) {
|
|
|
- return done();
|
|
|
- }
|
|
|
-
|
|
|
- delete opt.highlight;
|
|
|
-
|
|
|
- if (!tokens.length) return done();
|
|
|
-
|
|
|
- let pending = 0;
|
|
|
- marked.walkTokens(tokens, function(token) {
|
|
|
- if (token.type === 'code') {
|
|
|
- pending++;
|
|
|
- setTimeout(() => {
|
|
|
- highlight(token.text, token.lang, function(err, code) {
|
|
|
- if (err) {
|
|
|
- return done(err);
|
|
|
- }
|
|
|
- if (code != null && code !== token.text) {
|
|
|
- token.text = code;
|
|
|
- token.escaped = true;
|
|
|
- }
|
|
|
-
|
|
|
- pending--;
|
|
|
- if (pending === 0) {
|
|
|
- done();
|
|
|
- }
|
|
|
- });
|
|
|
- }, 0);
|
|
|
- }
|
|
|
- });
|
|
|
-
|
|
|
- if (pending === 0) {
|
|
|
- done();
|
|
|
- }
|
|
|
-
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- try {
|
|
|
- const tokens = Lexer.lex(src, opt);
|
|
|
- if (opt.walkTokens) {
|
|
|
- marked.walkTokens(tokens, opt.walkTokens);
|
|
|
- }
|
|
|
- return Parser.parse(tokens, opt);
|
|
|
- } catch (e) {
|
|
|
- e.message += '\nPlease report this to https://github.com/markedjs/marked.';
|
|
|
- if (opt.silent) {
|
|
|
- return '<p>An error occurred:</p><pre>'
|
|
|
- + escape(e.message + '', true)
|
|
|
- + '</pre>';
|
|
|
- }
|
|
|
- throw e;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * Options
|
|
|
- */
|
|
|
-
|
|
|
-marked.options =
|
|
|
-marked.setOptions = function(opt) {
|
|
|
- merge(marked.defaults, opt);
|
|
|
- changeDefaults(marked.defaults);
|
|
|
- return marked;
|
|
|
-};
|
|
|
-
|
|
|
-marked.getDefaults = getDefaults;
|
|
|
-
|
|
|
-marked.defaults = defaults;
|
|
|
-
|
|
|
-/**
|
|
|
- * Use Extension
|
|
|
- */
|
|
|
-
|
|
|
-marked.use = function(extension) {
|
|
|
- const opts = merge({}, extension);
|
|
|
- if (extension.renderer) {
|
|
|
- const renderer = marked.defaults.renderer || new Renderer();
|
|
|
- for (const prop in extension.renderer) {
|
|
|
- const prevRenderer = renderer[prop];
|
|
|
- renderer[prop] = (...args) => {
|
|
|
- let ret = extension.renderer[prop].apply(renderer, args);
|
|
|
- if (ret === false) {
|
|
|
- ret = prevRenderer.apply(renderer, args);
|
|
|
- }
|
|
|
- return ret;
|
|
|
- };
|
|
|
- }
|
|
|
- opts.renderer = renderer;
|
|
|
- }
|
|
|
- if (extension.tokenizer) {
|
|
|
- const tokenizer = marked.defaults.tokenizer || new Tokenizer();
|
|
|
- for (const prop in extension.tokenizer) {
|
|
|
- const prevTokenizer = tokenizer[prop];
|
|
|
- tokenizer[prop] = (...args) => {
|
|
|
- let ret = extension.tokenizer[prop].apply(tokenizer, args);
|
|
|
- if (ret === false) {
|
|
|
- ret = prevTokenizer.apply(tokenizer, args);
|
|
|
- }
|
|
|
- return ret;
|
|
|
- };
|
|
|
- }
|
|
|
- opts.tokenizer = tokenizer;
|
|
|
- }
|
|
|
- if (extension.walkTokens) {
|
|
|
- const walkTokens = marked.defaults.walkTokens;
|
|
|
- opts.walkTokens = (token) => {
|
|
|
- extension.walkTokens(token);
|
|
|
- if (walkTokens) {
|
|
|
- walkTokens(token);
|
|
|
- }
|
|
|
- };
|
|
|
- }
|
|
|
- marked.setOptions(opts);
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * Run callback for every token
|
|
|
- */
|
|
|
-
|
|
|
-marked.walkTokens = function(tokens, callback) {
|
|
|
- for (const token of tokens) {
|
|
|
- callback(token);
|
|
|
- switch (token.type) {
|
|
|
- case 'table': {
|
|
|
- for (const cell of token.tokens.header) {
|
|
|
- marked.walkTokens(cell, callback);
|
|
|
- }
|
|
|
- for (const row of token.tokens.cells) {
|
|
|
- for (const cell of row) {
|
|
|
- marked.walkTokens(cell, callback);
|
|
|
- }
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 'list': {
|
|
|
- marked.walkTokens(token.items, callback);
|
|
|
- break;
|
|
|
- }
|
|
|
- default: {
|
|
|
- if (token.tokens) {
|
|
|
- marked.walkTokens(token.tokens, callback);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * Parse Inline
|
|
|
- */
|
|
|
-marked.parseInline = function(src, opt) {
|
|
|
- // throw error in case of non string input
|
|
|
- if (typeof src === 'undefined' || src === null) {
|
|
|
- throw new Error('marked.parseInline(): input parameter is undefined or null');
|
|
|
- }
|
|
|
- if (typeof src !== 'string') {
|
|
|
- throw new Error('marked.parseInline(): input parameter is of type '
|
|
|
- + Object.prototype.toString.call(src) + ', string expected');
|
|
|
- }
|
|
|
-
|
|
|
- opt = merge({}, marked.defaults, opt || {});
|
|
|
- checkSanitizeDeprecation(opt);
|
|
|
-
|
|
|
- try {
|
|
|
- const tokens = Lexer.lexInline(src, opt);
|
|
|
- if (opt.walkTokens) {
|
|
|
- marked.walkTokens(tokens, opt.walkTokens);
|
|
|
- }
|
|
|
- return Parser.parseInline(tokens, opt);
|
|
|
- } catch (e) {
|
|
|
- e.message += '\nPlease report this to https://github.com/markedjs/marked.';
|
|
|
- if (opt.silent) {
|
|
|
- return '<p>An error occurred:</p><pre>'
|
|
|
- + escape(e.message + '', true)
|
|
|
- + '</pre>';
|
|
|
- }
|
|
|
- throw e;
|
|
|
- }
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * Expose
|
|
|
- */
|
|
|
-
|
|
|
-marked.Parser = Parser;
|
|
|
-marked.parser = Parser.parse;
|
|
|
-
|
|
|
-marked.Renderer = Renderer;
|
|
|
-marked.TextRenderer = TextRenderer;
|
|
|
-
|
|
|
-marked.Lexer = Lexer;
|
|
|
-marked.lexer = Lexer.lex;
|
|
|
-
|
|
|
-marked.Tokenizer = Tokenizer;
|
|
|
-
|
|
|
-marked.Slugger = Slugger;
|
|
|
-
|
|
|
-marked.parse = marked;
|
|
|
-
|
|
|
-var marked_1 = marked;
|
|
|
-
|
|
|
-export default marked_1;
|