2023-10-03 11:14:36 +08:00
/ * *
* marked v4 . 3.0 - a markdown parser
* Copyright ( c ) 2011 - 2023 , Christopher Jeffrey . ( MIT Licensed )
* https : //github.com/markedjs/marked
* /
/ * *
* DO NOT EDIT THIS FILE
* The code in this file is generated from files in . / src /
* /
function getDefaults ( ) {
return {
async : false ,
baseUrl : null ,
breaks : false ,
extensions : null ,
gfm : true ,
headerIds : true ,
headerPrefix : '' ,
highlight : null ,
hooks : null ,
langPrefix : 'language-' ,
mangle : true ,
pedantic : false ,
renderer : null ,
sanitize : false ,
sanitizer : null ,
silent : false ,
smartypants : false ,
tokenizer : null ,
walkTokens : null ,
xhtml : false
} ;
}
let defaults = getDefaults ( ) ;
function changeDefaults ( newDefaults ) {
defaults = newDefaults ;
}
/ * *
* Helpers
* /
const escapeTest = /[&<>"']/ ;
const escapeReplace = new RegExp ( escapeTest . source , 'g' ) ;
const escapeTestNoEncode = /[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/ ;
const escapeReplaceNoEncode = new RegExp ( escapeTestNoEncode . source , 'g' ) ;
const escapeReplacements = {
'&' : '&' ,
'<' : '<' ,
'>' : '>' ,
'"' : '"' ,
"'" : '''
} ;
const getEscapeReplacement = ( ch ) => escapeReplacements [ ch ] ;
function escape ( html , encode ) {
if ( encode ) {
if ( escapeTest . test ( html ) ) {
return html . replace ( escapeReplace , getEscapeReplacement ) ;
}
} else {
if ( escapeTestNoEncode . test ( html ) ) {
return html . replace ( escapeReplaceNoEncode , getEscapeReplacement ) ;
}
}
return html ;
}
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig ;
/ * *
* @ param { string } html
* /
function unescape ( html ) {
// explicitly match decimal, hex, and named HTML entities
return html . replace ( unescapeTest , ( _ , n ) => {
n = n . toLowerCase ( ) ;
if ( n === 'colon' ) return ':' ;
if ( n . charAt ( 0 ) === '#' ) {
return n . charAt ( 1 ) === 'x'
? String . fromCharCode ( parseInt ( n . substring ( 2 ) , 16 ) )
: String . fromCharCode ( + n . substring ( 1 ) ) ;
}
return '' ;
} ) ;
}
const caret = /(^|[^\[])\^/g ;
/ * *
* @ param { string | RegExp } regex
* @ param { string } opt
* /
function edit ( regex , opt ) {
regex = typeof regex === 'string' ? regex : regex . source ;
opt = opt || '' ;
const obj = {
replace : ( name , val ) => {
val = val . source || val ;
val = val . replace ( caret , '$1' ) ;
regex = regex . replace ( name , val ) ;
return obj ;
} ,
getRegex : ( ) => {
return new RegExp ( regex , opt ) ;
}
} ;
return obj ;
}
const nonWordAndColonTest = /[^\w:]/g ;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i ;
/ * *
* @ param { boolean } sanitize
* @ param { string } base
* @ param { string } href
* /
function cleanUrl ( sanitize , base , href ) {
if ( sanitize ) {
let prot ;
try {
prot = decodeURIComponent ( unescape ( href ) )
. replace ( nonWordAndColonTest , '' )
. toLowerCase ( ) ;
} catch ( e ) {
return null ;
}
if ( prot . indexOf ( 'javascript:' ) === 0 || prot . indexOf ( 'vbscript:' ) === 0 || prot . indexOf ( 'data:' ) === 0 ) {
return null ;
}
}
if ( base && ! originIndependentUrl . test ( href ) ) {
href = resolveUrl ( base , href ) ;
}
try {
href = encodeURI ( href ) . replace ( /%25/g , '%' ) ;
} catch ( e ) {
return null ;
}
return href ;
}
const baseUrls = { } ;
const justDomain = /^[^:]+:\/*[^/]*$/ ;
const protocol = /^([^:]+:)[\s\S]*$/ ;
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/ ;
/ * *
* @ param { string } base
* @ param { string } href
* /
function resolveUrl ( base , href ) {
if ( ! baseUrls [ ' ' + base ] ) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if ( justDomain . test ( base ) ) {
baseUrls [ ' ' + base ] = base + '/' ;
} else {
baseUrls [ ' ' + base ] = rtrim ( base , '/' , true ) ;
}
}
base = baseUrls [ ' ' + base ] ;
const relativeBase = base . indexOf ( ':' ) === - 1 ;
if ( href . substring ( 0 , 2 ) === '//' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( protocol , '$1' ) + href ;
} else if ( href . charAt ( 0 ) === '/' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( domain , '$1' ) + href ;
} else {
return base + href ;
}
}
const noopTest = { exec : function noopTest ( ) { } } ;
function splitCells ( tableRow , count ) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow . replace ( /\|/g , ( match , offset , str ) => {
let escaped = false ,
curr = offset ;
while ( -- curr >= 0 && str [ curr ] === '\\' ) escaped = ! escaped ;
if ( escaped ) {
// odd number of slashes means | is escaped
// so we leave it alone
return '|' ;
} else {
// add space before unescaped |
return ' |' ;
}
} ) ,
cells = row . split ( / \|/ ) ;
let i = 0 ;
// First/last cell in a row cannot be empty if it has no leading/trailing pipe
if ( ! cells [ 0 ] . trim ( ) ) { cells . shift ( ) ; }
if ( cells . length > 0 && ! cells [ cells . length - 1 ] . trim ( ) ) { cells . pop ( ) ; }
if ( cells . length > count ) {
cells . splice ( count ) ;
} else {
while ( cells . length < count ) cells . push ( '' ) ;
}
for ( ; i < cells . length ; i ++ ) {
// leading or trailing whitespace is ignored per the gfm spec
cells [ i ] = cells [ i ] . trim ( ) . replace ( /\\\|/g , '|' ) ;
}
return cells ;
}
/ * *
* Remove trailing 'c' s . Equivalent to str . replace ( /c*$/ , '' ) .
* /c*$/ is vulnerable to REDOS .
*
* @ param { string } str
* @ param { string } c
* @ param { boolean } invert Remove suffix of non - c chars instead . Default falsey .
* /
function rtrim ( str , c , invert ) {
const l = str . length ;
if ( l === 0 ) {
return '' ;
}
// Length of suffix matching the invert condition.
let suffLen = 0 ;
// Step left until we fail to match the invert condition.
while ( suffLen < l ) {
const currChar = str . charAt ( l - suffLen - 1 ) ;
if ( currChar === c && ! invert ) {
suffLen ++ ;
} else if ( currChar !== c && invert ) {
suffLen ++ ;
} else {
break ;
}
}
return str . slice ( 0 , l - suffLen ) ;
}
function findClosingBracket ( str , b ) {
if ( str . indexOf ( b [ 1 ] ) === - 1 ) {
return - 1 ;
}
const l = str . length ;
let level = 0 ,
i = 0 ;
for ( ; i < l ; i ++ ) {
if ( str [ i ] === '\\' ) {
i ++ ;
} else if ( str [ i ] === b [ 0 ] ) {
level ++ ;
} else if ( str [ i ] === b [ 1 ] ) {
level -- ;
if ( level < 0 ) {
return i ;
}
}
}
return - 1 ;
}
function checkSanitizeDeprecation ( opt ) {
if ( opt && opt . sanitize && ! opt . silent ) {
console . warn ( 'marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options' ) ;
}
}
// copied from https://stackoverflow.com/a/5450113/806777
/ * *
* @ param { string } pattern
* @ param { number } count
* /
function repeatString ( pattern , count ) {
if ( count < 1 ) {
return '' ;
}
let result = '' ;
while ( count > 1 ) {
if ( count & 1 ) {
result += pattern ;
}
count >>= 1 ;
pattern += pattern ;
}
return result + pattern ;
}
function outputLink ( cap , link , raw , lexer ) {
const href = link . href ;
const title = link . title ? escape ( link . title ) : null ;
const text = cap [ 1 ] . replace ( /\\([\[\]])/g , '$1' ) ;
if ( cap [ 0 ] . charAt ( 0 ) !== '!' ) {
lexer . state . inLink = true ;
const token = {
type : 'link' ,
raw ,
href ,
title ,
text ,
tokens : lexer . inlineTokens ( text )
} ;
lexer . state . inLink = false ;
return token ;
}
return {
type : 'image' ,
raw ,
href ,
title ,
text : escape ( text )
} ;
}
function indentCodeCompensation ( raw , text ) {
const matchIndentToCode = raw . match ( /^(\s+)(?:```)/ ) ;
if ( matchIndentToCode === null ) {
return text ;
}
const indentToCode = matchIndentToCode [ 1 ] ;
return text
. split ( '\n' )
. map ( node => {
const matchIndentInNode = node . match ( /^\s+/ ) ;
if ( matchIndentInNode === null ) {
return node ;
}
const [ indentInNode ] = matchIndentInNode ;
if ( indentInNode . length >= indentToCode . length ) {
return node . slice ( indentToCode . length ) ;
}
return node ;
} )
. join ( '\n' ) ;
}
/ * *
* Tokenizer
* /
class Tokenizer {
constructor ( options ) {
this . options = options || defaults ;
}
space ( src ) {
const cap = this . rules . block . newline . exec ( src ) ;
if ( cap && cap [ 0 ] . length > 0 ) {
return {
type : 'space' ,
raw : cap [ 0 ]
} ;
}
}
code ( src ) {
const cap = this . rules . block . code . exec ( src ) ;
if ( cap ) {
const text = cap [ 0 ] . replace ( /^ {1,4}/gm , '' ) ;
return {
type : 'code' ,
raw : cap [ 0 ] ,
codeBlockStyle : 'indented' ,
text : ! this . options . pedantic
? rtrim ( text , '\n' )
: text
} ;
}
}
fences ( src ) {
const cap = this . rules . block . fences . exec ( src ) ;
if ( cap ) {
const raw = cap [ 0 ] ;
const text = indentCodeCompensation ( raw , cap [ 3 ] || '' ) ;
return {
type : 'code' ,
raw ,
lang : cap [ 2 ] ? cap [ 2 ] . trim ( ) . replace ( this . rules . inline . _escapes , '$1' ) : cap [ 2 ] ,
text
} ;
}
}
heading ( src ) {
const cap = this . rules . block . heading . exec ( src ) ;
if ( cap ) {
let text = cap [ 2 ] . trim ( ) ;
// remove trailing #s
if ( /#$/ . test ( text ) ) {
const trimmed = rtrim ( text , '#' ) ;
if ( this . options . pedantic ) {
text = trimmed . trim ( ) ;
} else if ( ! trimmed || / $/ . test ( trimmed ) ) {
// CommonMark requires space before trailing #s
text = trimmed . trim ( ) ;
}
}
return {
type : 'heading' ,
raw : cap [ 0 ] ,
depth : cap [ 1 ] . length ,
text ,
tokens : this . lexer . inline ( text )
} ;
}
}
hr ( src ) {
const cap = this . rules . block . hr . exec ( src ) ;
if ( cap ) {
return {
type : 'hr' ,
raw : cap [ 0 ]
} ;
}
}
blockquote ( src ) {
const cap = this . rules . block . blockquote . exec ( src ) ;
if ( cap ) {
const text = cap [ 0 ] . replace ( /^ *>[ \t]?/gm , '' ) ;
const top = this . lexer . state . top ;
this . lexer . state . top = true ;
const tokens = this . lexer . blockTokens ( text ) ;
this . lexer . state . top = top ;
return {
type : 'blockquote' ,
raw : cap [ 0 ] ,
tokens ,
text
} ;
}
}
list ( src ) {
let cap = this . rules . block . list . exec ( src ) ;
if ( cap ) {
let raw , istask , ischecked , indent , i , blankLine , endsWithBlankLine ,
line , nextLine , rawLine , itemContents , endEarly ;
let bull = cap [ 1 ] . trim ( ) ;
const isordered = bull . length > 1 ;
const list = {
type : 'list' ,
raw : '' ,
ordered : isordered ,
start : isordered ? + bull . slice ( 0 , - 1 ) : '' ,
loose : false ,
items : [ ]
} ;
bull = isordered ? ` \\ d{1,9} \\ ${ bull . slice ( - 1 ) } ` : ` \\ ${ bull } ` ;
if ( this . options . pedantic ) {
bull = isordered ? bull : '[*+-]' ;
}
// Get next list item
const itemRegex = new RegExp ( ` ^( {0,3} ${ bull } )((?:[ \t ][^ \\ n]*)?(?: \\ n| $ )) ` ) ;
// Check if current bullet point can start a new List Item
while ( src ) {
endEarly = false ;
if ( ! ( cap = itemRegex . exec ( src ) ) ) {
break ;
}
if ( this . rules . block . hr . test ( src ) ) { // End list if bullet was actually HR (possibly move into itemRegex?)
break ;
}
raw = cap [ 0 ] ;
src = src . substring ( raw . length ) ;
line = cap [ 2 ] . split ( '\n' , 1 ) [ 0 ] . replace ( /^\t+/ , ( t ) => ' ' . repeat ( 3 * t . length ) ) ;
nextLine = src . split ( '\n' , 1 ) [ 0 ] ;
if ( this . options . pedantic ) {
indent = 2 ;
itemContents = line . trimLeft ( ) ;
} else {
indent = cap [ 2 ] . search ( /[^ ]/ ) ; // Find first non-space char
indent = indent > 4 ? 1 : indent ; // Treat indented code blocks (> 4 spaces) as having only 1 indent
itemContents = line . slice ( indent ) ;
indent += cap [ 1 ] . length ;
}
blankLine = false ;
if ( ! line && /^ *$/ . test ( nextLine ) ) { // Items begin with at most one blank line
raw += nextLine + '\n' ;
src = src . substring ( nextLine . length + 1 ) ;
endEarly = true ;
}
if ( ! endEarly ) {
const nextBulletRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }(?:[*+-]| \\ d{1,9}[.)])((?:[ \t ][^ \\ n]*)?(?: \\ n| $ )) ` ) ;
const hrRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }((?:- *){3,}|(?:_ *){3,}|(?: \\ * *){3,})(?: \\ n+| $ ) ` ) ;
const fencesBeginRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }(?: \` \` \` |~~~) ` ) ;
const headingBeginRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }# ` ) ;
// Check if following lines should be included in List Item
while ( src ) {
rawLine = src . split ( '\n' , 1 ) [ 0 ] ;
nextLine = rawLine ;
// Re-align to follow commonmark nesting rules
if ( this . options . pedantic ) {
nextLine = nextLine . replace ( /^ {1,4}(?=( {4})*[^ ])/g , ' ' ) ;
}
// End list item if found code fences
if ( fencesBeginRegex . test ( nextLine ) ) {
break ;
}
// End list item if found start of new heading
if ( headingBeginRegex . test ( nextLine ) ) {
break ;
}
// End list item if found start of new bullet
if ( nextBulletRegex . test ( nextLine ) ) {
break ;
}
// Horizontal rule found
if ( hrRegex . test ( src ) ) {
break ;
}
if ( nextLine . search ( /[^ ]/ ) >= indent || ! nextLine . trim ( ) ) { // Dedent if possible
itemContents += '\n' + nextLine . slice ( indent ) ;
} else {
// not enough indentation
if ( blankLine ) {
break ;
}
// paragraph continuation unless last line was a different block level element
if ( line . search ( /[^ ]/ ) >= 4 ) { // indented code block
break ;
}
if ( fencesBeginRegex . test ( line ) ) {
break ;
}
if ( headingBeginRegex . test ( line ) ) {
break ;
}
if ( hrRegex . test ( line ) ) {
break ;
}
itemContents += '\n' + nextLine ;
}
if ( ! blankLine && ! nextLine . trim ( ) ) { // Check if current line is blank
blankLine = true ;
}
raw += rawLine + '\n' ;
src = src . substring ( rawLine . length + 1 ) ;
line = nextLine . slice ( indent ) ;
}
}
if ( ! list . loose ) {
// If the previous item ended with a blank line, the list is loose
if ( endsWithBlankLine ) {
list . loose = true ;
} else if ( /\n *\n *$/ . test ( raw ) ) {
endsWithBlankLine = true ;
}
}
// Check for task list items
if ( this . options . gfm ) {
istask = /^\[[ xX]\] / . exec ( itemContents ) ;
if ( istask ) {
ischecked = istask [ 0 ] !== '[ ] ' ;
itemContents = itemContents . replace ( /^\[[ xX]\] +/ , '' ) ;
}
}
list . items . push ( {
type : 'list_item' ,
raw ,
task : ! ! istask ,
checked : ischecked ,
loose : false ,
text : itemContents
} ) ;
list . raw += raw ;
}
// Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
list . items [ list . items . length - 1 ] . raw = raw . trimRight ( ) ;
list . items [ list . items . length - 1 ] . text = itemContents . trimRight ( ) ;
list . raw = list . raw . trimRight ( ) ;
const l = list . items . length ;
// Item child tokens handled here at end because we needed to have the final item to trim it first
for ( i = 0 ; i < l ; i ++ ) {
this . lexer . state . top = false ;
list . items [ i ] . tokens = this . lexer . blockTokens ( list . items [ i ] . text , [ ] ) ;
if ( ! list . loose ) {
// Check if list should be loose
const spacers = list . items [ i ] . tokens . filter ( t => t . type === 'space' ) ;
const hasMultipleLineBreaks = spacers . length > 0 && spacers . some ( t => / \ n . * \ n / . test ( t . raw ) ) ;
list . loose = hasMultipleLineBreaks ;
}
}
// Set all items to loose if list is loose
if ( list . loose ) {
for ( i = 0 ; i < l ; i ++ ) {
list . items [ i ] . loose = true ;
}
}
return list ;
}
}
html ( src ) {
const cap = this . rules . block . html . exec ( src ) ;
if ( cap ) {
const token = {
type : 'html' ,
raw : cap [ 0 ] ,
pre : ! this . options . sanitizer
&& ( cap [ 1 ] === 'pre' || cap [ 1 ] === 'script' || cap [ 1 ] === 'style' ) ,
text : cap [ 0 ]
} ;
if ( this . options . sanitize ) {
const text = this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape ( cap [ 0 ] ) ;
token . type = 'paragraph' ;
token . text = text ;
token . tokens = this . lexer . inline ( text ) ;
}
return token ;
}
}
def ( src ) {
const cap = this . rules . block . def . exec ( src ) ;
if ( cap ) {
const tag = cap [ 1 ] . toLowerCase ( ) . replace ( /\s+/g , ' ' ) ;
const href = cap [ 2 ] ? cap [ 2 ] . replace ( /^<(.*)>$/ , '$1' ) . replace ( this . rules . inline . _escapes , '$1' ) : '' ;
const title = cap [ 3 ] ? cap [ 3 ] . substring ( 1 , cap [ 3 ] . length - 1 ) . replace ( this . rules . inline . _escapes , '$1' ) : cap [ 3 ] ;
return {
type : 'def' ,
tag ,
raw : cap [ 0 ] ,
href ,
title
} ;
}
}
table ( src ) {
const cap = this . rules . block . table . exec ( src ) ;
if ( cap ) {
const item = {
type : 'table' ,
header : splitCells ( cap [ 1 ] ) . map ( c => { return { text : c } ; } ) ,
align : cap [ 2 ] . replace ( /^ *|\| *$/g , '' ) . split ( / *\| */ ) ,
rows : cap [ 3 ] && cap [ 3 ] . trim ( ) ? cap [ 3 ] . replace ( /\n[ \t]*$/ , '' ) . split ( '\n' ) : [ ]
} ;
if ( item . header . length === item . align . length ) {
item . raw = cap [ 0 ] ;
let l = item . align . length ;
let i , j , k , row ;
for ( i = 0 ; i < l ; i ++ ) {
if ( /^ *-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'right' ;
} else if ( /^ *:-+: *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'center' ;
} else if ( /^ *:-+ *$/ . test ( item . align [ i ] ) ) {
item . align [ i ] = 'left' ;
} else {
item . align [ i ] = null ;
}
}
l = item . rows . length ;
for ( i = 0 ; i < l ; i ++ ) {
item . rows [ i ] = splitCells ( item . rows [ i ] , item . header . length ) . map ( c => { return { text : c } ; } ) ;
}
// parse child tokens inside headers and cells
// header child tokens
l = item . header . length ;
for ( j = 0 ; j < l ; j ++ ) {
item . header [ j ] . tokens = this . lexer . inline ( item . header [ j ] . text ) ;
}
// cell child tokens
l = item . rows . length ;
for ( j = 0 ; j < l ; j ++ ) {
row = item . rows [ j ] ;
for ( k = 0 ; k < row . length ; k ++ ) {
row [ k ] . tokens = this . lexer . inline ( row [ k ] . text ) ;
}
}
return item ;
}
}
}
lheading ( src ) {
const cap = this . rules . block . lheading . exec ( src ) ;
if ( cap ) {
return {
type : 'heading' ,
raw : cap [ 0 ] ,
depth : cap [ 2 ] . charAt ( 0 ) === '=' ? 1 : 2 ,
text : cap [ 1 ] ,
tokens : this . lexer . inline ( cap [ 1 ] )
} ;
}
}
paragraph ( src ) {
const cap = this . rules . block . paragraph . exec ( src ) ;
if ( cap ) {
const text = cap [ 1 ] . charAt ( cap [ 1 ] . length - 1 ) === '\n'
? cap [ 1 ] . slice ( 0 , - 1 )
: cap [ 1 ] ;
return {
type : 'paragraph' ,
raw : cap [ 0 ] ,
text ,
tokens : this . lexer . inline ( text )
} ;
}
}
text ( src ) {
const cap = this . rules . block . text . exec ( src ) ;
if ( cap ) {
return {
type : 'text' ,
raw : cap [ 0 ] ,
text : cap [ 0 ] ,
tokens : this . lexer . inline ( cap [ 0 ] )
} ;
}
}
escape ( src ) {
const cap = this . rules . inline . escape . exec ( src ) ;
if ( cap ) {
return {
type : 'escape' ,
raw : cap [ 0 ] ,
text : escape ( cap [ 1 ] )
} ;
}
}
tag ( src ) {
const cap = this . rules . inline . tag . exec ( src ) ;
if ( cap ) {
if ( ! this . lexer . state . inLink && /^<a /i . test ( cap [ 0 ] ) ) {
this . lexer . state . inLink = true ;
} else if ( this . lexer . state . inLink && /^<\/a>/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inLink = false ;
}
if ( ! this . lexer . state . inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inRawBlock = true ;
} else if ( this . lexer . state . inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inRawBlock = false ;
}
return {
type : this . options . sanitize
? 'text'
: 'html' ,
raw : cap [ 0 ] ,
inLink : this . lexer . state . inLink ,
inRawBlock : this . lexer . state . inRawBlock ,
text : this . options . sanitize
? ( this . options . sanitizer
? this . options . sanitizer ( cap [ 0 ] )
: escape ( cap [ 0 ] ) )
: cap [ 0 ]
} ;
}
}
link ( src ) {
const cap = this . rules . inline . link . exec ( src ) ;
if ( cap ) {
const trimmedUrl = cap [ 2 ] . trim ( ) ;
if ( ! this . options . pedantic && /^</ . test ( trimmedUrl ) ) {
// commonmark requires matching angle brackets
if ( ! ( />$/ . test ( trimmedUrl ) ) ) {
return ;
}
// ending angle bracket cannot be escaped
const rtrimSlash = rtrim ( trimmedUrl . slice ( 0 , - 1 ) , '\\' ) ;
if ( ( trimmedUrl . length - rtrimSlash . length ) % 2 === 0 ) {
return ;
}
} else {
// find closing parenthesis
const lastParenIndex = findClosingBracket ( cap [ 2 ] , '()' ) ;
if ( lastParenIndex > - 1 ) {
const start = cap [ 0 ] . indexOf ( '!' ) === 0 ? 5 : 4 ;
const linkLen = start + cap [ 1 ] . length + lastParenIndex ;
cap [ 2 ] = cap [ 2 ] . substring ( 0 , lastParenIndex ) ;
cap [ 0 ] = cap [ 0 ] . substring ( 0 , linkLen ) . trim ( ) ;
cap [ 3 ] = '' ;
}
}
let href = cap [ 2 ] ;
let title = '' ;
if ( this . options . pedantic ) {
// split pedantic href and title
const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/ . exec ( href ) ;
if ( link ) {
href = link [ 1 ] ;
title = link [ 3 ] ;
}
} else {
title = cap [ 3 ] ? cap [ 3 ] . slice ( 1 , - 1 ) : '' ;
}
href = href . trim ( ) ;
if ( /^</ . test ( href ) ) {
if ( this . options . pedantic && ! ( />$/ . test ( trimmedUrl ) ) ) {
// pedantic allows starting angle bracket without ending angle bracket
href = href . slice ( 1 ) ;
} else {
href = href . slice ( 1 , - 1 ) ;
}
}
return outputLink ( cap , {
href : href ? href . replace ( this . rules . inline . _escapes , '$1' ) : href ,
title : title ? title . replace ( this . rules . inline . _escapes , '$1' ) : title
} , cap [ 0 ] , this . lexer ) ;
}
}
reflink ( src , links ) {
let cap ;
if ( ( cap = this . rules . inline . reflink . exec ( src ) )
|| ( cap = this . rules . inline . nolink . exec ( src ) ) ) {
let link = ( cap [ 2 ] || cap [ 1 ] ) . replace ( /\s+/g , ' ' ) ;
link = links [ link . toLowerCase ( ) ] ;
if ( ! link ) {
const text = cap [ 0 ] . charAt ( 0 ) ;
return {
type : 'text' ,
raw : text ,
text
} ;
}
return outputLink ( cap , link , cap [ 0 ] , this . lexer ) ;
}
}
emStrong ( src , maskedSrc , prevChar = '' ) {
let match = this . rules . inline . emStrong . lDelim . exec ( src ) ;
if ( ! match ) return ;
// _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
if ( match [ 3 ] && prevChar . match ( /[\p{L}\p{N}]/u ) ) return ;
const nextChar = match [ 1 ] || match [ 2 ] || '' ;
if ( ! nextChar || ( nextChar && ( prevChar === '' || this . rules . inline . punctuation . exec ( prevChar ) ) ) ) {
const lLength = match [ 0 ] . length - 1 ;
let rDelim , rLength , delimTotal = lLength , midDelimTotal = 0 ;
const endReg = match [ 0 ] [ 0 ] === '*' ? this . rules . inline . emStrong . rDelimAst : this . rules . inline . emStrong . rDelimUnd ;
endReg . lastIndex = 0 ;
// Clip maskedSrc to same section of string as src (move to lexer?)
maskedSrc = maskedSrc . slice ( - 1 * src . length + lLength ) ;
while ( ( match = endReg . exec ( maskedSrc ) ) != null ) {
rDelim = match [ 1 ] || match [ 2 ] || match [ 3 ] || match [ 4 ] || match [ 5 ] || match [ 6 ] ;
if ( ! rDelim ) continue ; // skip single * in __abc*abc__
rLength = rDelim . length ;
if ( match [ 3 ] || match [ 4 ] ) { // found another Left Delim
delimTotal += rLength ;
continue ;
} else if ( match [ 5 ] || match [ 6 ] ) { // either Left or Right Delim
if ( lLength % 3 && ! ( ( lLength + rLength ) % 3 ) ) {
midDelimTotal += rLength ;
continue ; // CommonMark Emphasis Rules 9-10
}
}
delimTotal -= rLength ;
if ( delimTotal > 0 ) continue ; // Haven't found enough closing delimiters
// Remove extra characters. *a*** -> *a*
rLength = Math . min ( rLength , rLength + delimTotal + midDelimTotal ) ;
const raw = src . slice ( 0 , lLength + match . index + ( match [ 0 ] . length - rDelim . length ) + rLength ) ;
// Create `em` if smallest delimiter has odd char count. *a***
if ( Math . min ( lLength , rLength ) % 2 ) {
const text = raw . slice ( 1 , - 1 ) ;
return {
type : 'em' ,
raw ,
text ,
tokens : this . lexer . inlineTokens ( text )
} ;
}
// Create 'strong' if smallest delimiter has even char count. **a***
const text = raw . slice ( 2 , - 2 ) ;
return {
type : 'strong' ,
raw ,
text ,
tokens : this . lexer . inlineTokens ( text )
} ;
}
}
}
codespan ( src ) {
const cap = this . rules . inline . code . exec ( src ) ;
if ( cap ) {
let text = cap [ 2 ] . replace ( /\n/g , ' ' ) ;
const hasNonSpaceChars = /[^ ]/ . test ( text ) ;
const hasSpaceCharsOnBothEnds = /^ / . test ( text ) && / $/ . test ( text ) ;
if ( hasNonSpaceChars && hasSpaceCharsOnBothEnds ) {
text = text . substring ( 1 , text . length - 1 ) ;
}
text = escape ( text , true ) ;
return {
type : 'codespan' ,
raw : cap [ 0 ] ,
text
} ;
}
}
br ( src ) {
const cap = this . rules . inline . br . exec ( src ) ;
if ( cap ) {
return {
type : 'br' ,
raw : cap [ 0 ]
} ;
}
}
del ( src ) {
const cap = this . rules . inline . del . exec ( src ) ;
if ( cap ) {
return {
type : 'del' ,
raw : cap [ 0 ] ,
text : cap [ 2 ] ,
tokens : this . lexer . inlineTokens ( cap [ 2 ] )
} ;
}
}
autolink ( src , mangle ) {
const cap = this . rules . inline . autolink . exec ( src ) ;
if ( cap ) {
let text , href ;
if ( cap [ 2 ] === '@' ) {
text = escape ( this . options . mangle ? mangle ( cap [ 1 ] ) : cap [ 1 ] ) ;
href = 'mailto:' + text ;
} else {
text = escape ( cap [ 1 ] ) ;
href = text ;
}
return {
type : 'link' ,
raw : cap [ 0 ] ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ;
}
}
url ( src , mangle ) {
let cap ;
if ( cap = this . rules . inline . url . exec ( src ) ) {
let text , href ;
if ( cap [ 2 ] === '@' ) {
text = escape ( this . options . mangle ? mangle ( cap [ 0 ] ) : cap [ 0 ] ) ;
href = 'mailto:' + text ;
} else {
// do extended autolink path validation
let prevCapZero ;
do {
prevCapZero = cap [ 0 ] ;
cap [ 0 ] = this . rules . inline . _backpedal . exec ( cap [ 0 ] ) [ 0 ] ;
} while ( prevCapZero !== cap [ 0 ] ) ;
text = escape ( cap [ 0 ] ) ;
if ( cap [ 1 ] === 'www.' ) {
href = 'http://' + cap [ 0 ] ;
} else {
href = cap [ 0 ] ;
}
}
return {
type : 'link' ,
raw : cap [ 0 ] ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ;
}
}
inlineText ( src , smartypants ) {
const cap = this . rules . inline . text . exec ( src ) ;
if ( cap ) {
let text ;
if ( this . lexer . state . inRawBlock ) {
text = this . options . sanitize ? ( this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape ( cap [ 0 ] ) ) : cap [ 0 ] ;
} else {
text = escape ( this . options . smartypants ? smartypants ( cap [ 0 ] ) : cap [ 0 ] ) ;
}
return {
type : 'text' ,
raw : cap [ 0 ] ,
text
} ;
}
}
}
/ * *
* Block - Level Grammar
* /
const block = {
newline : /^(?: *(?:\n|$))+/ ,
code : /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/ ,
fences : /^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/ ,
hr : /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/ ,
heading : /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/ ,
blockquote : /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/ ,
list : /^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/ ,
html : '^ {0,3}(?:' // optional indentation
+ '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
+ '|comment[^\\n]*(\\n+|$)' // (2)
+ '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
+ '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
+ '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
+ '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
+ '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
+ '|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
+ ')' ,
def : /^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/ ,
table : noopTest ,
lheading : /^((?:.|\n(?!\n))+?)\n {0,3}(=+|-+) *(?:\n+|$)/ ,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph : /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/ ,
text : /^[^\n]+/
} ;
block . _label = /(?!\s*\])(?:\\.|[^\[\]\\])+/ ;
block . _title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/ ;
block . def = edit ( block . def )
. replace ( 'label' , block . _label )
. replace ( 'title' , block . _title )
. getRegex ( ) ;
block . bullet = /(?:[*+-]|\d{1,9}[.)])/ ;
block . listItemStart = edit ( /^( *)(bull) */ )
. replace ( 'bull' , block . bullet )
. getRegex ( ) ;
block . list = edit ( block . list )
. replace ( /bull/g , block . bullet )
. replace ( 'hr' , '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))' )
. replace ( 'def' , '\\n+(?=' + block . def . source + ')' )
. getRegex ( ) ;
block . _tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul' ;
block . _comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/ ;
block . html = edit ( block . html , 'i' )
. replace ( 'comment' , block . _comment )
. replace ( 'tag' , block . _tag )
. replace ( 'attribute' , / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/ )
. getRegex ( ) ;
block . paragraph = edit ( block . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( '|lheading' , '' ) // setex headings don't interrupt commonmark paragraphs
. replace ( '|table' , '' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // pars can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . blockquote = edit ( block . blockquote )
. replace ( 'paragraph' , block . paragraph )
. getRegex ( ) ;
/ * *
* Normal Block Grammar
* /
block . normal = { ... block } ;
/ * *
* GFM Block Grammar
* /
block . gfm = {
... block . normal ,
table : '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
} ;
block . gfm . table = edit ( block . gfm . table )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'code' , ' {4}[^\\n]' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // tables can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . gfm . paragraph = edit ( block . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( '|lheading' , '' ) // setex headings don't interrupt commonmark paragraphs
. replace ( 'table' , block . gfm . table ) // interrupt paragraphs with table
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // pars can be interrupted by type (6) html blocks
. getRegex ( ) ;
/ * *
* Pedantic grammar ( original John Gruber ' s loose markdown specification )
* /
block . pedantic = {
... block . normal ,
html : edit (
'^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))' )
. replace ( 'comment' , block . _comment )
. replace ( /tag/g , '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b' )
. getRegex ( ) ,
def : /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/ ,
heading : /^(#{1,6})(.*)(?:\n+|$)/ ,
fences : noopTest , // fences not supported
lheading : /^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/ ,
paragraph : edit ( block . normal . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' *#{1,6} *[^\n]' )
. replace ( 'lheading' , block . lheading )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( '|fences' , '' )
. replace ( '|list' , '' )
. replace ( '|html' , '' )
. getRegex ( )
} ;
/ * *
* Inline - Level Grammar
* /
const inline = {
escape : /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/ ,
autolink : /^<(scheme:[^\s\x00-\x1f<>]*|email)>/ ,
url : noopTest ,
tag : '^comment'
+ '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
+ '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
+ '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>' , // CDATA section
link : /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/ ,
reflink : /^!?\[(label)\]\[(ref)\]/ ,
nolink : /^!?\[(ref)\](?:\[\])?/ ,
reflinkSearch : 'reflink|nolink(?!\\()' ,
emStrong : {
lDelim : /^(?:\*+(?:([punct_])|[^\s*]))|^_+(?:([punct*])|([^\s_]))/ ,
// (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
// () Skip orphan inside strong () Consume to delim (1) #*** (2) a***#, a*** (3) #***a, ***a (4) ***# (5) #***# (6) a***a
rDelimAst : /^(?:[^_*\\]|\\.)*?\_\_(?:[^_*\\]|\\.)*?\*(?:[^_*\\]|\\.)*?(?=\_\_)|(?:[^*\\]|\\.)+(?=[^*])|[punct_](\*+)(?=[\s]|$)|(?:[^punct*_\s\\]|\\.)(\*+)(?=[punct_\s]|$)|[punct_\s](\*+)(?=[^punct*_\s])|[\s](\*+)(?=[punct_])|[punct_](\*+)(?=[punct_])|(?:[^punct*_\s\\]|\\.)(\*+)(?=[^punct*_\s])/ ,
rDelimUnd : /^(?:[^_*\\]|\\.)*?\*\*(?:[^_*\\]|\\.)*?\_(?:[^_*\\]|\\.)*?(?=\*\*)|(?:[^_\\]|\\.)+(?=[^_])|[punct*](\_+)(?=[\s]|$)|(?:[^punct*_\s\\]|\\.)(\_+)(?=[punct*\s]|$)|[punct*\s](\_+)(?=[^punct*_\s])|[\s](\_+)(?=[punct*])|[punct*](\_+)(?=[punct*])/ // ^- Not allowed for _
} ,
code : /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/ ,
br : /^( {2,}|\\)\n(?!\s*$)/ ,
del : noopTest ,
text : /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/ ,
punctuation : /^([\spunctuation])/
} ;
// list of punctuation marks from CommonMark spec
// without * and _ to handle the different emphasis markers * and _
inline . _punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~' ;
inline . punctuation = edit ( inline . punctuation ) . replace ( /punctuation/g , inline . _punctuation ) . getRegex ( ) ;
// sequences em should skip over [title](link), `code`, <html>
inline . blockSkip = /\[[^\]]*?\]\([^\)]*?\)|`[^`]*?`|<[^>]*?>/g ;
// lookbehind is not available on Safari as of version 16
// inline.escapedEmSt = /(?<=(?:^|[^\\)(?:\\[^])*)\\[*_]/g;
inline . escapedEmSt = /(?:^|[^\\])(?:\\\\)*\\[*_]/g ;
inline . _comment = edit ( block . _comment ) . replace ( '(?:-->|$)' , '-->' ) . getRegex ( ) ;
inline . emStrong . lDelim = edit ( inline . emStrong . lDelim )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . emStrong . rDelimAst = edit ( inline . emStrong . rDelimAst , 'g' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . emStrong . rDelimUnd = edit ( inline . emStrong . rDelimUnd , 'g' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . _escapes = /\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g ;
inline . _scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/ ;
inline . _email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/ ;
inline . autolink = edit ( inline . autolink )
. replace ( 'scheme' , inline . _scheme )
. replace ( 'email' , inline . _email )
. getRegex ( ) ;
inline . _attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/ ;
inline . tag = edit ( inline . tag )
. replace ( 'comment' , inline . _comment )
. replace ( 'attribute' , inline . _attribute )
. getRegex ( ) ;
inline . _label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/ ;
inline . _href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/ ;
inline . _title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/ ;
inline . link = edit ( inline . link )
. replace ( 'label' , inline . _label )
. replace ( 'href' , inline . _href )
. replace ( 'title' , inline . _title )
. getRegex ( ) ;
inline . reflink = edit ( inline . reflink )
. replace ( 'label' , inline . _label )
. replace ( 'ref' , block . _label )
. getRegex ( ) ;
inline . nolink = edit ( inline . nolink )
. replace ( 'ref' , block . _label )
. getRegex ( ) ;
inline . reflinkSearch = edit ( inline . reflinkSearch , 'g' )
. replace ( 'reflink' , inline . reflink )
. replace ( 'nolink' , inline . nolink )
. getRegex ( ) ;
/ * *
* Normal Inline Grammar
* /
inline . normal = { ... inline } ;
/ * *
* Pedantic Inline Grammar
* /
inline . pedantic = {
... inline . normal ,
strong : {
start : /^__|\*\*/ ,
middle : /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/ ,
endAst : /\*\*(?!\*)/g ,
endUnd : /__(?!_)/g
} ,
em : {
start : /^_|\*/ ,
middle : /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/ ,
endAst : /\*(?!\*)/g ,
endUnd : /_(?!_)/g
} ,
link : edit ( /^!?\[(label)\]\((.*?)\)/ )
. replace ( 'label' , inline . _label )
. getRegex ( ) ,
reflink : edit ( /^!?\[(label)\]\s*\[([^\]]*)\]/ )
. replace ( 'label' , inline . _label )
. getRegex ( )
} ;
/ * *
* GFM Inline Grammar
* /
inline . gfm = {
... inline . normal ,
escape : edit ( inline . escape ) . replace ( '])' , '~|])' ) . getRegex ( ) ,
_extended _email : /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/ ,
url : /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/ ,
_backpedal : /(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/ ,
del : /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/ ,
text : /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/
} ;
inline . gfm . url = edit ( inline . gfm . url , 'i' )
. replace ( 'email' , inline . gfm . _extended _email )
. getRegex ( ) ;
/ * *
* GFM + Line Breaks Inline Grammar
* /
inline . breaks = {
... inline . gfm ,
br : edit ( inline . br ) . replace ( '{2,}' , '*' ) . getRegex ( ) ,
text : edit ( inline . gfm . text )
. replace ( '\\b_' , '\\b_| {2,}\\n' )
. replace ( /\{2,\}/g , '*' )
. getRegex ( )
} ;
/ * *
* smartypants text replacement
* @ param { string } text
* /
function smartypants ( text ) {
return text
// em-dashes
. replace ( /---/g , '\u2014' )
// en-dashes
. replace ( /--/g , '\u2013' )
// opening singles
. replace ( /(^|[-\u2014/(\[{"\s])'/g , '$1\u2018' )
// closing singles & apostrophes
. replace ( /'/g , '\u2019' )
// opening doubles
. replace ( /(^|[-\u2014/(\[{\u2018\s])"/g , '$1\u201c' )
// closing doubles
. replace ( /"/g , '\u201d' )
// ellipses
. replace ( /\.{3}/g , '\u2026' ) ;
}
/ * *
* mangle email addresses
* @ param { string } text
* /
function mangle ( text ) {
let out = '' ,
i ,
ch ;
const l = text . length ;
for ( i = 0 ; i < l ; i ++ ) {
ch = text . charCodeAt ( i ) ;
if ( Math . random ( ) > 0.5 ) {
ch = 'x' + ch . toString ( 16 ) ;
}
out += '&#' + ch + ';' ;
}
return out ;
}
/ * *
* Block Lexer
* /
class Lexer {
constructor ( options ) {
this . tokens = [ ] ;
this . tokens . links = Object . create ( null ) ;
this . options = options || defaults ;
this . options . tokenizer = this . options . tokenizer || new Tokenizer ( ) ;
this . tokenizer = this . options . tokenizer ;
this . tokenizer . options = this . options ;
this . tokenizer . lexer = this ;
this . inlineQueue = [ ] ;
this . state = {
inLink : false ,
inRawBlock : false ,
top : true
} ;
const rules = {
block : block . normal ,
inline : inline . normal
} ;
if ( this . options . pedantic ) {
rules . block = block . pedantic ;
rules . inline = inline . pedantic ;
} else if ( this . options . gfm ) {
rules . block = block . gfm ;
if ( this . options . breaks ) {
rules . inline = inline . breaks ;
} else {
rules . inline = inline . gfm ;
}
}
this . tokenizer . rules = rules ;
}
/ * *
* Expose Rules
* /
static get rules ( ) {
return {
block ,
inline
} ;
}
/ * *
* Static Lex Method
* /
static lex ( src , options ) {
const lexer = new Lexer ( options ) ;
return lexer . lex ( src ) ;
}
/ * *
* Static Lex Inline Method
* /
static lexInline ( src , options ) {
const lexer = new Lexer ( options ) ;
return lexer . inlineTokens ( src ) ;
}
/ * *
* Preprocessing
* /
lex ( src ) {
src = src
. replace ( /\r\n|\r/g , '\n' ) ;
this . blockTokens ( src , this . tokens ) ;
let next ;
while ( next = this . inlineQueue . shift ( ) ) {
this . inlineTokens ( next . src , next . tokens ) ;
}
return this . tokens ;
}
/ * *
* Lexing
* /
blockTokens ( src , tokens = [ ] ) {
if ( this . options . pedantic ) {
src = src . replace ( /\t/g , ' ' ) . replace ( /^ +$/gm , '' ) ;
} else {
src = src . replace ( /^( *)(\t+)/gm , ( _ , leading , tabs ) => {
return leading + ' ' . repeat ( tabs . length ) ;
} ) ;
}
let token , lastToken , cutSrc , lastParagraphClipped ;
while ( src ) {
if ( this . options . extensions
&& this . options . extensions . block
&& this . options . extensions . block . some ( ( extTokenizer ) => {
if ( token = extTokenizer . call ( { lexer : this } , src , tokens ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
return true ;
}
return false ;
} ) ) {
continue ;
}
// newline
if ( token = this . tokenizer . space ( src ) ) {
src = src . substring ( token . raw . length ) ;
if ( token . raw . length === 1 && tokens . length > 0 ) {
// if there's a single \n as a spacer, it's terminating the last line,
// so move it there so that we don't get unecessary paragraph tags
tokens [ tokens . length - 1 ] . raw += '\n' ;
} else {
tokens . push ( token ) ;
}
continue ;
}
// code
if ( token = this . tokenizer . code ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
// An indented code block cannot interrupt a paragraph.
if ( lastToken && ( lastToken . type === 'paragraph' || lastToken . type === 'text' ) ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
} else {
tokens . push ( token ) ;
}
continue ;
}
// fences
if ( token = this . tokenizer . fences ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// heading
if ( token = this . tokenizer . heading ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// hr
if ( token = this . tokenizer . hr ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// blockquote
if ( token = this . tokenizer . blockquote ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// list
if ( token = this . tokenizer . list ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// html
if ( token = this . tokenizer . html ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// def
if ( token = this . tokenizer . def ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && ( lastToken . type === 'paragraph' || lastToken . type === 'text' ) ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . raw ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
} else if ( ! this . tokens . links [ token . tag ] ) {
this . tokens . links [ token . tag ] = {
href : token . href ,
title : token . title
} ;
}
continue ;
}
// table (gfm)
if ( token = this . tokenizer . table ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// lheading
if ( token = this . tokenizer . lheading ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// top-level paragraph
// prevent paragraph consuming extensions by clipping 'src' to extension start
cutSrc = src ;
if ( this . options . extensions && this . options . extensions . startBlock ) {
let startIndex = Infinity ;
const tempSrc = src . slice ( 1 ) ;
let tempStart ;
this . options . extensions . startBlock . forEach ( function ( getStartIndex ) {
tempStart = getStartIndex . call ( { lexer : this } , tempSrc ) ;
if ( typeof tempStart === 'number' && tempStart >= 0 ) { startIndex = Math . min ( startIndex , tempStart ) ; }
} ) ;
if ( startIndex < Infinity && startIndex >= 0 ) {
cutSrc = src . substring ( 0 , startIndex + 1 ) ;
}
}
if ( this . state . top && ( token = this . tokenizer . paragraph ( cutSrc ) ) ) {
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastParagraphClipped && lastToken . type === 'paragraph' ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue . pop ( ) ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
} else {
tokens . push ( token ) ;
}
lastParagraphClipped = ( cutSrc . length !== src . length ) ;
src = src . substring ( token . raw . length ) ;
continue ;
}
// text
if ( token = this . tokenizer . text ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && lastToken . type === 'text' ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue . pop ( ) ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
} else {
tokens . push ( token ) ;
}
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
break ;
} else {
throw new Error ( errMsg ) ;
}
}
}
this . state . top = true ;
return tokens ;
}
inline ( src , tokens = [ ] ) {
this . inlineQueue . push ( { src , tokens } ) ;
return tokens ;
}
/ * *
* Lexing / Compiling
* /
inlineTokens ( src , tokens = [ ] ) {
let token , lastToken , cutSrc ;
// String with links masked to avoid interference with em and strong
let maskedSrc = src ;
let match ;
let keepPrevChar , prevChar ;
// Mask out reflinks
if ( this . tokens . links ) {
const links = Object . keys ( this . tokens . links ) ;
if ( links . length > 0 ) {
while ( ( match = this . tokenizer . rules . inline . reflinkSearch . exec ( maskedSrc ) ) != null ) {
if ( links . includes ( match [ 0 ] . slice ( match [ 0 ] . lastIndexOf ( '[' ) + 1 , - 1 ) ) ) {
maskedSrc = maskedSrc . slice ( 0 , match . index ) + '[' + repeatString ( 'a' , match [ 0 ] . length - 2 ) + ']' + maskedSrc . slice ( this . tokenizer . rules . inline . reflinkSearch . lastIndex ) ;
}
}
}
}
// Mask out other blocks
while ( ( match = this . tokenizer . rules . inline . blockSkip . exec ( maskedSrc ) ) != null ) {
maskedSrc = maskedSrc . slice ( 0 , match . index ) + '[' + repeatString ( 'a' , match [ 0 ] . length - 2 ) + ']' + maskedSrc . slice ( this . tokenizer . rules . inline . blockSkip . lastIndex ) ;
}
// Mask out escaped em & strong delimiters
while ( ( match = this . tokenizer . rules . inline . escapedEmSt . exec ( maskedSrc ) ) != null ) {
maskedSrc = maskedSrc . slice ( 0 , match . index + match [ 0 ] . length - 2 ) + '++' + maskedSrc . slice ( this . tokenizer . rules . inline . escapedEmSt . lastIndex ) ;
this . tokenizer . rules . inline . escapedEmSt . lastIndex -- ;
}
while ( src ) {
if ( ! keepPrevChar ) {
prevChar = '' ;
}
keepPrevChar = false ;
// extensions
if ( this . options . extensions
&& this . options . extensions . inline
&& this . options . extensions . inline . some ( ( extTokenizer ) => {
if ( token = extTokenizer . call ( { lexer : this } , src , tokens ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
return true ;
}
return false ;
} ) ) {
continue ;
}
// escape
if ( token = this . tokenizer . escape ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// tag
if ( token = this . tokenizer . tag ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && token . type === 'text' && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
} else {
tokens . push ( token ) ;
}
continue ;
}
// link
if ( token = this . tokenizer . link ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// reflink, nolink
if ( token = this . tokenizer . reflink ( src , this . tokens . links ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && token . type === 'text' && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
} else {
tokens . push ( token ) ;
}
continue ;
}
// em & strong
if ( token = this . tokenizer . emStrong ( src , maskedSrc , prevChar ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// code
if ( token = this . tokenizer . codespan ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// br
if ( token = this . tokenizer . br ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// del (gfm)
if ( token = this . tokenizer . del ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// autolink
if ( token = this . tokenizer . autolink ( src , mangle ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// url (gfm)
if ( ! this . state . inLink && ( token = this . tokenizer . url ( src , mangle ) ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// text
// prevent inlineText consuming extensions by clipping 'src' to extension start
cutSrc = src ;
if ( this . options . extensions && this . options . extensions . startInline ) {
let startIndex = Infinity ;
const tempSrc = src . slice ( 1 ) ;
let tempStart ;
this . options . extensions . startInline . forEach ( function ( getStartIndex ) {
tempStart = getStartIndex . call ( { lexer : this } , tempSrc ) ;
if ( typeof tempStart === 'number' && tempStart >= 0 ) { startIndex = Math . min ( startIndex , tempStart ) ; }
} ) ;
if ( startIndex < Infinity && startIndex >= 0 ) {
cutSrc = src . substring ( 0 , startIndex + 1 ) ;
}
}
if ( token = this . tokenizer . inlineText ( cutSrc , smartypants ) ) {
src = src . substring ( token . raw . length ) ;
if ( token . raw . slice ( - 1 ) !== '_' ) { // Track prevChar before string of ____ started
prevChar = token . raw . slice ( - 1 ) ;
}
keepPrevChar = true ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
} else {
tokens . push ( token ) ;
}
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
break ;
} else {
throw new Error ( errMsg ) ;
}
}
}
return tokens ;
}
}
/ * *
* Renderer
* /
class Renderer {
constructor ( options ) {
this . options = options || defaults ;
}
code ( code , infostring , escaped ) {
const lang = ( infostring || '' ) . match ( /\S*/ ) [ 0 ] ;
if ( this . options . highlight ) {
const out = this . options . highlight ( code , lang ) ;
if ( out != null && out !== code ) {
escaped = true ;
code = out ;
}
}
code = code . replace ( /\n$/ , '' ) + '\n' ;
if ( ! lang ) {
return '<pre><code>'
+ ( escaped ? code : escape ( code , true ) )
+ '</code></pre>\n' ;
}
return '<pre><code class="'
+ this . options . langPrefix
+ escape ( lang )
+ '">'
+ ( escaped ? code : escape ( code , true ) )
+ '</code></pre>\n' ;
}
/ * *
* @ param { string } quote
* /
blockquote ( quote ) {
return ` <blockquote> \n ${ quote } </blockquote> \n ` ;
}
html ( html ) {
return html ;
}
/ * *
* @ param { string } text
* @ param { string } level
* @ param { string } raw
* @ param { any } slugger
* /
heading ( text , level , raw , slugger ) {
if ( this . options . headerIds ) {
const id = this . options . headerPrefix + slugger . slug ( raw ) ;
return ` <h ${ level } id=" ${ id } "> ${ text } </h ${ level } > \n ` ;
}
// ignore IDs
return ` <h ${ level } > ${ text } </h ${ level } > \n ` ;
}
hr ( ) {
return this . options . xhtml ? '<hr/>\n' : '<hr>\n' ;
}
list ( body , ordered , start ) {
const type = ordered ? 'ol' : 'ul' ,
startatt = ( ordered && start !== 1 ) ? ( ' start="' + start + '"' ) : '' ;
return '<' + type + startatt + '>\n' + body + '</' + type + '>\n' ;
}
/ * *
* @ param { string } text
* /
listitem ( text ) {
return ` <li> ${ text } </li> \n ` ;
}
checkbox ( checked ) {
return '<input '
+ ( checked ? 'checked="" ' : '' )
+ 'disabled="" type="checkbox"'
+ ( this . options . xhtml ? ' /' : '' )
+ '> ' ;
}
/ * *
* @ param { string } text
* /
paragraph ( text ) {
return ` <p> ${ text } </p> \n ` ;
}
/ * *
* @ param { string } header
* @ param { string } body
* /
table ( header , body ) {
if ( body ) body = ` <tbody> ${ body } </tbody> ` ;
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ body
+ '</table>\n' ;
}
/ * *
* @ param { string } content
* /
tablerow ( content ) {
return ` <tr> \n ${ content } </tr> \n ` ;
}
tablecell ( content , flags ) {
const type = flags . header ? 'th' : 'td' ;
const tag = flags . align
? ` < ${ type } align=" ${ flags . align } "> `
: ` < ${ type } > ` ;
return tag + content + ` </ ${ type } > \n ` ;
}
/ * *
* span level renderer
* @ param { string } text
* /
strong ( text ) {
return ` <strong> ${ text } </strong> ` ;
}
/ * *
* @ param { string } text
* /
em ( text ) {
return ` <em> ${ text } </em> ` ;
}
/ * *
* @ param { string } text
* /
codespan ( text ) {
return ` <code> ${ text } </code> ` ;
}
br ( ) {
return this . options . xhtml ? '<br/>' : '<br>' ;
}
/ * *
* @ param { string } text
* /
del ( text ) {
return ` <del> ${ text } </del> ` ;
}
/ * *
* @ param { string } href
* @ param { string } title
* @ param { string } text
* /
link ( href , title , text ) {
href = cleanUrl ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( href === null ) {
return text ;
}
let out = '<a href="' + href + '"' ;
if ( title ) {
out += ' title="' + title + '"' ;
}
out += '>' + text + '</a>' ;
return out ;
}
/ * *
* @ param { string } href
* @ param { string } title
* @ param { string } text
* /
image ( href , title , text ) {
href = cleanUrl ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( href === null ) {
return text ;
}
let out = ` <img src=" ${ href } " alt=" ${ text } " ` ;
if ( title ) {
out += ` title=" ${ title } " ` ;
}
out += this . options . xhtml ? '/>' : '>' ;
return out ;
}
text ( text ) {
return text ;
}
}
/ * *
* TextRenderer
* returns only the textual part of the token
* /
class TextRenderer {
// no need for block level renderers
strong ( text ) {
return text ;
}
em ( text ) {
return text ;
}
codespan ( text ) {
return text ;
}
del ( text ) {
return text ;
}
html ( text ) {
return text ;
}
text ( text ) {
return text ;
}
link ( href , title , text ) {
return '' + text ;
}
image ( href , title , text ) {
return '' + text ;
}
br ( ) {
return '' ;
}
}
/ * *
* Slugger generates header id
* /
class Slugger {
constructor ( ) {
this . seen = { } ;
}
/ * *
* @ param { string } value
* /
serialize ( value ) {
return value
. toLowerCase ( )
. trim ( )
// remove html tags
. replace ( /<[!\/a-z].*?>/ig , '' )
// remove unwanted chars
. replace ( /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g , '' )
. replace ( /\s/g , '-' ) ;
}
/ * *
* Finds the next safe ( unique ) slug to use
* @ param { string } originalSlug
* @ param { boolean } isDryRun
* /
getNextSafeSlug ( originalSlug , isDryRun ) {
let slug = originalSlug ;
let occurenceAccumulator = 0 ;
if ( this . seen . hasOwnProperty ( slug ) ) {
occurenceAccumulator = this . seen [ originalSlug ] ;
do {
occurenceAccumulator ++ ;
slug = originalSlug + '-' + occurenceAccumulator ;
} while ( this . seen . hasOwnProperty ( slug ) ) ;
}
if ( ! isDryRun ) {
this . seen [ originalSlug ] = occurenceAccumulator ;
this . seen [ slug ] = 0 ;
}
return slug ;
}
/ * *
* Convert string to unique id
* @ param { object } [ options ]
* @ param { boolean } [ options . dryrun ] Generates the next unique slug without
* updating the internal accumulator .
* /
slug ( value , options = { } ) {
const slug = this . serialize ( value ) ;
return this . getNextSafeSlug ( slug , options . dryrun ) ;
}
}
/ * *
* Parsing & Compiling
* /
class Parser {
constructor ( options ) {
this . options = options || defaults ;
this . options . renderer = this . options . renderer || new Renderer ( ) ;
this . renderer = this . options . renderer ;
this . renderer . options = this . options ;
this . textRenderer = new TextRenderer ( ) ;
this . slugger = new Slugger ( ) ;
}
/ * *
* Static Parse Method
* /
static parse ( tokens , options ) {
const parser = new Parser ( options ) ;
return parser . parse ( tokens ) ;
}
/ * *
* Static Parse Inline Method
* /
static parseInline ( tokens , options ) {
const parser = new Parser ( options ) ;
return parser . parseInline ( tokens ) ;
}
/ * *
* Parse Loop
* /
parse ( tokens , top = true ) {
let out = '' ,
i ,
j ,
k ,
l2 ,
l3 ,
row ,
cell ,
header ,
body ,
token ,
ordered ,
start ,
loose ,
itemBody ,
item ,
checked ,
task ,
checkbox ,
ret ;
const l = tokens . length ;
for ( i = 0 ; i < l ; i ++ ) {
token = tokens [ i ] ;
// Run any renderer extensions
if ( this . options . extensions && this . options . extensions . renderers && this . options . extensions . renderers [ token . type ] ) {
ret = this . options . extensions . renderers [ token . type ] . call ( { parser : this } , token ) ;
if ( ret !== false || ! [ 'space' , 'hr' , 'heading' , 'code' , 'table' , 'blockquote' , 'list' , 'html' , 'paragraph' , 'text' ] . includes ( token . type ) ) {
out += ret || '' ;
continue ;
}
}
switch ( token . type ) {
case 'space' : {
continue ;
}
case 'hr' : {
out += this . renderer . hr ( ) ;
continue ;
}
case 'heading' : {
out += this . renderer . heading (
this . parseInline ( token . tokens ) ,
token . depth ,
unescape ( this . parseInline ( token . tokens , this . textRenderer ) ) ,
this . slugger ) ;
continue ;
}
case 'code' : {
out += this . renderer . code ( token . text ,
token . lang ,
token . escaped ) ;
continue ;
}
case 'table' : {
header = '' ;
// header
cell = '' ;
l2 = token . header . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
cell += this . renderer . tablecell (
this . parseInline ( token . header [ j ] . tokens ) ,
{ header : true , align : token . align [ j ] }
) ;
}
header += this . renderer . tablerow ( cell ) ;
body = '' ;
l2 = token . rows . length ;
for ( j = 0 ; j < l2 ; j ++ ) {
row = token . rows [ j ] ;
cell = '' ;
l3 = row . length ;
for ( k = 0 ; k < l3 ; k ++ ) {
cell += this . renderer . tablecell (
this . parseInline ( row [ k ] . tokens ) ,
{ header : false , align : token . align [ k ] }
) ;
}
body += this . renderer . tablerow ( cell ) ;
}
out += this . renderer . table ( header , body ) ;
continue ;
}
case 'blockquote' : {
body = this . parse ( token . tokens ) ;
out += this . renderer . blockquote ( body ) ;
continue ;
}
case 'list' : {
ordered = token . ordered ;
start = token . start ;
loose = token . loose ;
l2 = token . items . length ;
body = '' ;
for ( j = 0 ; j < l2 ; j ++ ) {
item = token . items [ j ] ;
checked = item . checked ;
task = item . task ;
itemBody = '' ;
if ( item . task ) {
checkbox = this . renderer . checkbox ( checked ) ;
if ( loose ) {
if ( item . tokens . length > 0 && item . tokens [ 0 ] . type === 'paragraph' ) {
item . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . text ;
if ( item . tokens [ 0 ] . tokens && item . tokens [ 0 ] . tokens . length > 0 && item . tokens [ 0 ] . tokens [ 0 ] . type === 'text' ) {
item . tokens [ 0 ] . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . tokens [ 0 ] . text ;
}
} else {
item . tokens . unshift ( {
type : 'text' ,
text : checkbox
} ) ;
}
} else {
itemBody += checkbox ;
}
}
itemBody += this . parse ( item . tokens , loose ) ;
body += this . renderer . listitem ( itemBody , task , checked ) ;
}
out += this . renderer . list ( body , ordered , start ) ;
continue ;
}
case 'html' : {
// TODO parse inline content if parameter markdown=1
out += this . renderer . html ( token . text ) ;
continue ;
}
case 'paragraph' : {
out += this . renderer . paragraph ( this . parseInline ( token . tokens ) ) ;
continue ;
}
case 'text' : {
body = token . tokens ? this . parseInline ( token . tokens ) : token . text ;
while ( i + 1 < l && tokens [ i + 1 ] . type === 'text' ) {
token = tokens [ ++ i ] ;
body += '\n' + ( token . tokens ? this . parseInline ( token . tokens ) : token . text ) ;
}
out += top ? this . renderer . paragraph ( body ) : body ;
continue ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return ;
} else {
throw new Error ( errMsg ) ;
}
}
}
}
return out ;
}
/ * *
* Parse Inline Tokens
* /
parseInline ( tokens , renderer ) {
renderer = renderer || this . renderer ;
let out = '' ,
i ,
token ,
ret ;
const l = tokens . length ;
for ( i = 0 ; i < l ; i ++ ) {
token = tokens [ i ] ;
// Run any renderer extensions
if ( this . options . extensions && this . options . extensions . renderers && this . options . extensions . renderers [ token . type ] ) {
ret = this . options . extensions . renderers [ token . type ] . call ( { parser : this } , token ) ;
if ( ret !== false || ! [ 'escape' , 'html' , 'link' , 'image' , 'strong' , 'em' , 'codespan' , 'br' , 'del' , 'text' ] . includes ( token . type ) ) {
out += ret || '' ;
continue ;
}
}
switch ( token . type ) {
case 'escape' : {
out += renderer . text ( token . text ) ;
break ;
}
case 'html' : {
out += renderer . html ( token . text ) ;
break ;
}
case 'link' : {
out += renderer . link ( token . href , token . title , this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'image' : {
out += renderer . image ( token . href , token . title , token . text ) ;
break ;
}
case 'strong' : {
out += renderer . strong ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'em' : {
out += renderer . em ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'codespan' : {
out += renderer . codespan ( token . text ) ;
break ;
}
case 'br' : {
out += renderer . br ( ) ;
break ;
}
case 'del' : {
out += renderer . del ( this . parseInline ( token . tokens , renderer ) ) ;
break ;
}
case 'text' : {
out += renderer . text ( token . text ) ;
break ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return ;
} else {
throw new Error ( errMsg ) ;
}
}
}
}
return out ;
}
}
class Hooks {
constructor ( options ) {
this . options = options || defaults ;
}
static passThroughHooks = new Set ( [
'preprocess' ,
'postprocess'
] ) ;
/ * *
* Process markdown before marked
* /
preprocess ( markdown ) {
return markdown ;
}
/ * *
* Process HTML after marked is finished
* /
postprocess ( html ) {
return html ;
}
}
function onError ( silent , async , callback ) {
return ( e ) => {
e . message += '\nPlease report this to https://github.com/markedjs/marked.' ;
if ( silent ) {
const msg = '<p>An error occurred:</p><pre>'
+ escape ( e . message + '' , true )
+ '</pre>' ;
if ( async ) {
return Promise . resolve ( msg ) ;
}
if ( callback ) {
callback ( null , msg ) ;
return ;
}
return msg ;
}
if ( async ) {
return Promise . reject ( e ) ;
}
if ( callback ) {
callback ( e ) ;
return ;
}
throw e ;
} ;
}
function parseMarkdown ( lexer , parser ) {
return ( src , opt , callback ) => {
if ( typeof opt === 'function' ) {
callback = opt ;
opt = null ;
}
const origOpt = { ... opt } ;
opt = { ... marked . defaults , ... origOpt } ;
const throwError = onError ( opt . silent , opt . async , callback ) ;
// throw error in case of non string input
if ( typeof src === 'undefined' || src === null ) {
return throwError ( new Error ( 'marked(): input parameter is undefined or null' ) ) ;
}
if ( typeof src !== 'string' ) {
return throwError ( new Error ( 'marked(): input parameter is of type '
+ Object . prototype . toString . call ( src ) + ', string expected' ) ) ;
}
checkSanitizeDeprecation ( opt ) ;
if ( opt . hooks ) {
opt . hooks . options = opt ;
}
if ( callback ) {
const highlight = opt . highlight ;
let tokens ;
try {
if ( opt . hooks ) {
src = opt . hooks . preprocess ( src ) ;
}
tokens = lexer ( src , opt ) ;
} catch ( e ) {
return throwError ( e ) ;
}
const done = function ( err ) {
let out ;
if ( ! err ) {
try {
if ( opt . walkTokens ) {
marked . walkTokens ( tokens , opt . walkTokens ) ;
}
out = parser ( tokens , opt ) ;
if ( opt . hooks ) {
out = opt . hooks . postprocess ( out ) ;
}
} catch ( e ) {
err = e ;
}
}
opt . highlight = highlight ;
return err
? throwError ( err )
: callback ( null , out ) ;
} ;
if ( ! highlight || highlight . length < 3 ) {
return done ( ) ;
}
delete opt . highlight ;
if ( ! tokens . length ) return done ( ) ;
let pending = 0 ;
marked . walkTokens ( tokens , function ( token ) {
if ( token . type === 'code' ) {
pending ++ ;
setTimeout ( ( ) => {
highlight ( token . text , token . lang , function ( err , code ) {
if ( err ) {
return done ( err ) ;
}
if ( code != null && code !== token . text ) {
token . text = code ;
token . escaped = true ;
}
pending -- ;
if ( pending === 0 ) {
done ( ) ;
}
} ) ;
} , 0 ) ;
}
} ) ;
if ( pending === 0 ) {
done ( ) ;
}
return ;
}
if ( opt . async ) {
return Promise . resolve ( opt . hooks ? opt . hooks . preprocess ( src ) : src )
. then ( src => lexer ( src , opt ) )
. then ( tokens => opt . walkTokens ? Promise . all ( marked . walkTokens ( tokens , opt . walkTokens ) ) . then ( ( ) => tokens ) : tokens )
. then ( tokens => parser ( tokens , opt ) )
. then ( html => opt . hooks ? opt . hooks . postprocess ( html ) : html )
. catch ( throwError ) ;
}
try {
if ( opt . hooks ) {
src = opt . hooks . preprocess ( src ) ;
}
const tokens = lexer ( src , opt ) ;
if ( opt . walkTokens ) {
marked . walkTokens ( tokens , opt . walkTokens ) ;
}
let html = parser ( tokens , opt ) ;
if ( opt . hooks ) {
html = opt . hooks . postprocess ( html ) ;
}
return html ;
} catch ( e ) {
return throwError ( e ) ;
}
} ;
}
/ * *
* Marked
* /
function marked ( src , opt , callback ) {
return parseMarkdown ( Lexer . lex , Parser . parse ) ( src , opt , callback ) ;
}
/ * *
* Options
* /
marked . options =
marked . setOptions = function ( opt ) {
marked . defaults = { ... marked . defaults , ... opt } ;
changeDefaults ( marked . defaults ) ;
return marked ;
} ;
marked . getDefaults = getDefaults ;
marked . defaults = defaults ;
/ * *
* Use Extension
* /
marked . use = function ( ... args ) {
const extensions = marked . defaults . extensions || { renderers : { } , childTokens : { } } ;
args . forEach ( ( pack ) => {
// copy options to new object
const opts = { ... pack } ;
// set async to true if it was set to true before
opts . async = marked . defaults . async || opts . async || false ;
// ==-- Parse "addon" extensions --== //
if ( pack . extensions ) {
pack . extensions . forEach ( ( ext ) => {
if ( ! ext . name ) {
throw new Error ( 'extension name required' ) ;
}
if ( ext . renderer ) { // Renderer extensions
const prevRenderer = extensions . renderers [ ext . name ] ;
if ( prevRenderer ) {
// Replace extension with func to run new extension but fall back if false
extensions . renderers [ ext . name ] = function ( ... args ) {
let ret = ext . renderer . apply ( this , args ) ;
if ( ret === false ) {
ret = prevRenderer . apply ( this , args ) ;
}
return ret ;
} ;
} else {
extensions . renderers [ ext . name ] = ext . renderer ;
}
}
if ( ext . tokenizer ) { // Tokenizer Extensions
if ( ! ext . level || ( ext . level !== 'block' && ext . level !== 'inline' ) ) {
throw new Error ( "extension level must be 'block' or 'inline'" ) ;
}
if ( extensions [ ext . level ] ) {
extensions [ ext . level ] . unshift ( ext . tokenizer ) ;
} else {
extensions [ ext . level ] = [ ext . tokenizer ] ;
}
if ( ext . start ) { // Function to check for start of token
if ( ext . level === 'block' ) {
if ( extensions . startBlock ) {
extensions . startBlock . push ( ext . start ) ;
} else {
extensions . startBlock = [ ext . start ] ;
}
} else if ( ext . level === 'inline' ) {
if ( extensions . startInline ) {
extensions . startInline . push ( ext . start ) ;
} else {
extensions . startInline = [ ext . start ] ;
}
}
}
}
if ( ext . childTokens ) { // Child tokens to be visited by walkTokens
extensions . childTokens [ ext . name ] = ext . childTokens ;
}
} ) ;
opts . extensions = extensions ;
}
// ==-- Parse "overwrite" extensions --== //
if ( pack . renderer ) {
const renderer = marked . defaults . renderer || new Renderer ( ) ;
for ( const prop in pack . renderer ) {
const prevRenderer = renderer [ prop ] ;
// Replace renderer with func to run extension, but fall back if false
renderer [ prop ] = ( ... args ) => {
let ret = pack . renderer [ prop ] . apply ( renderer , args ) ;
if ( ret === false ) {
ret = prevRenderer . apply ( renderer , args ) ;
}
return ret ;
} ;
}
opts . renderer = renderer ;
}
if ( pack . tokenizer ) {
const tokenizer = marked . defaults . tokenizer || new Tokenizer ( ) ;
for ( const prop in pack . tokenizer ) {
const prevTokenizer = tokenizer [ prop ] ;
// Replace tokenizer with func to run extension, but fall back if false
tokenizer [ prop ] = ( ... args ) => {
let ret = pack . tokenizer [ prop ] . apply ( tokenizer , args ) ;
if ( ret === false ) {
ret = prevTokenizer . apply ( tokenizer , args ) ;
}
return ret ;
} ;
}
opts . tokenizer = tokenizer ;
}
// ==-- Parse Hooks extensions --== //
if ( pack . hooks ) {
const hooks = marked . defaults . hooks || new Hooks ( ) ;
for ( const prop in pack . hooks ) {
const prevHook = hooks [ prop ] ;
if ( Hooks . passThroughHooks . has ( prop ) ) {
hooks [ prop ] = ( arg ) => {
if ( marked . defaults . async ) {
return Promise . resolve ( pack . hooks [ prop ] . call ( hooks , arg ) ) . then ( ret => {
return prevHook . call ( hooks , ret ) ;
} ) ;
}
const ret = pack . hooks [ prop ] . call ( hooks , arg ) ;
return prevHook . call ( hooks , ret ) ;
} ;
} else {
hooks [ prop ] = ( ... args ) => {
let ret = pack . hooks [ prop ] . apply ( hooks , args ) ;
if ( ret === false ) {
ret = prevHook . apply ( hooks , args ) ;
}
return ret ;
} ;
}
}
opts . hooks = hooks ;
}
// ==-- Parse WalkTokens extensions --== //
if ( pack . walkTokens ) {
const walkTokens = marked . defaults . walkTokens ;
opts . walkTokens = function ( token ) {
let values = [ ] ;
values . push ( pack . walkTokens . call ( this , token ) ) ;
if ( walkTokens ) {
values = values . concat ( walkTokens . call ( this , token ) ) ;
}
return values ;
} ;
}
marked . setOptions ( opts ) ;
} ) ;
} ;
/ * *
* Run callback for every token
* /
marked . walkTokens = function ( tokens , callback ) {
let values = [ ] ;
for ( const token of tokens ) {
values = values . concat ( callback . call ( marked , token ) ) ;
switch ( token . type ) {
case 'table' : {
for ( const cell of token . header ) {
values = values . concat ( marked . walkTokens ( cell . tokens , callback ) ) ;
}
for ( const row of token . rows ) {
for ( const cell of row ) {
values = values . concat ( marked . walkTokens ( cell . tokens , callback ) ) ;
}
}
break ;
}
case 'list' : {
values = values . concat ( marked . walkTokens ( token . items , callback ) ) ;
break ;
}
default : {
if ( marked . defaults . extensions && marked . defaults . extensions . childTokens && marked . defaults . extensions . childTokens [ token . type ] ) { // Walk any extensions
marked . defaults . extensions . childTokens [ token . type ] . forEach ( function ( childTokens ) {
values = values . concat ( marked . walkTokens ( token [ childTokens ] , callback ) ) ;
} ) ;
} else if ( token . tokens ) {
values = values . concat ( marked . walkTokens ( token . tokens , callback ) ) ;
}
}
}
}
return values ;
} ;
/ * *
* Parse Inline
* @ param { string } src
* /
marked . parseInline = parseMarkdown ( Lexer . lexInline , Parser . parseInline ) ;
/ * *
* Expose
* /
marked . Parser = Parser ;
marked . parser = Parser . parse ;
marked . Renderer = Renderer ;
marked . TextRenderer = TextRenderer ;
marked . Lexer = Lexer ;
marked . lexer = Lexer . lex ;
marked . Tokenizer = Tokenizer ;
marked . Slugger = Slugger ;
marked . Hooks = Hooks ;
marked . parse = marked ;
const options = marked . options ;
const setOptions = marked . setOptions ;
const use = marked . use ;
const walkTokens = marked . walkTokens ;
const parseInline = marked . parseInline ;
const parse = marked ;
const parser = Parser . parse ;
const lexer = Lexer . lex ;
export { Hooks , Lexer , Parser , Renderer , Slugger , TextRenderer , Tokenizer , defaults , getDefaults , lexer , marked , options , parse , parseInline , parser , setOptions , use , walkTokens } ;