mirror of
https://github.com/discourse/discourse.git
synced 2024-12-25 13:23:42 +08:00
9a1695ccc1
With Embroider, we can rely on async `import()` to do the splitting for us. This commit extracts from `pretty-text` all the parts that are meant to be loaded async into a new `discourse-markdown-it` package that is also a V2 addon (meaning that all files are presumed unused until they are imported, aka "static"). Mostly I tried to keep the very discourse specific stuff (accessing site settings and loading plugin features) inside discourse proper, while the new package aims to have some resembalance of a general purpose library, a MarkdownIt++ if you will. It is far from perfect because of how all the "options" stuff work but I think it's a good start for more refactorings (clearing up the interfaces) to happen later. With this, pretty-text and app/lib/text are mostly a kitchen sink of loosely related text processing utilities. After the refactor, a lot more code related to setting up the engine are now loaded lazily, which should be a pretty nice win. I also noticed that we are currently pulling in the `xss` library at initial load to power the "sanitize" stuff, but I suspect with a similar refactoring effort those usages can be removed too. (See also #23790). This PR does not attempt to fix the sanitize issue, but I think it sets things up on the right trajectory for that to happen later. Co-authored-by: David Taylor <david@taylorhq.com>
71 lines
1.9 KiB
JavaScript
71 lines
1.9 KiB
JavaScript
// since the markdown.it interface is a bit on the verbose side
|
|
// we can keep some general patterns here
|
|
|
|
// creates a rule suitable for inline parsing and replacement
|
|
//
|
|
// example:
|
|
// const rule = inlineRegexRule(md, {
|
|
// start: '#',
|
|
// matcher: /^#([\w-:]{1,101})/i,
|
|
// emitter: emitter
|
|
// });
|
|
|
|
// based off https://github.com/markdown-it/markdown-it-emoji/blob/master/dist/markdown-it-emoji.js
|
|
//
|
|
export function textReplace(state, callback, skipAllLinks) {
|
|
let i,
|
|
j,
|
|
l,
|
|
tokens,
|
|
token,
|
|
blockTokens = state.tokens,
|
|
linkLevel = 0;
|
|
|
|
for (j = 0, l = blockTokens.length; j < l; j++) {
|
|
if (blockTokens[j].type !== "inline") {
|
|
continue;
|
|
}
|
|
tokens = blockTokens[j].children;
|
|
|
|
// We scan from the end, to keep position when new tags added.
|
|
// Use reversed logic in links start/end match
|
|
for (i = tokens.length - 1; i >= 0; i--) {
|
|
token = tokens[i];
|
|
|
|
if (skipAllLinks) {
|
|
if (token.type === "link_open" || token.type === "link_close") {
|
|
linkLevel -= token.nesting;
|
|
} else if (token.type === "html_inline") {
|
|
const openLink = token.content.slice(0, 2).toLowerCase();
|
|
|
|
if (openLink === "<a") {
|
|
if (token.content.match(/^<a(\s.*)?>/i)) {
|
|
linkLevel++;
|
|
}
|
|
} else if (token.content.slice(0, 4).toLowerCase() === "</a>") {
|
|
linkLevel--;
|
|
}
|
|
}
|
|
} else {
|
|
if (token.type === "link_open" || token.type === "link_close") {
|
|
if (token.info === "auto") {
|
|
linkLevel -= token.nesting;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (token.type === "text" && linkLevel === 0) {
|
|
let split;
|
|
if ((split = callback(token.content, state))) {
|
|
// replace current node
|
|
blockTokens[j].children = tokens = state.md.utils.arrayReplaceAt(
|
|
tokens,
|
|
i,
|
|
split
|
|
);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|