mirror of
https://github.com/discourse/discourse.git
synced 2024-11-25 09:42:07 +08:00
Feature: Change markdown engine to markdown it
This commit removes the old evilstreak markdownjs engine. - Adds specs to WhiteLister and changes it to stop using globals (Fixes large memory leak) - Fixes edge cases around bbcode handling - Removes mdtest which is no longer valid (to be replaced with CommonMark) - Updates MiniRacer to correct minor unmanaged memory leak - Fixes plugin specs
This commit is contained in:
parent
9e03fae26c
commit
d0c5205a52
|
@ -156,7 +156,7 @@ GEM
|
|||
mime-types (2.99.3)
|
||||
mini_mime (0.1.3)
|
||||
mini_portile2 (2.2.0)
|
||||
mini_racer (0.1.9)
|
||||
mini_racer (0.1.10)
|
||||
libv8 (~> 5.3)
|
||||
minitest (5.10.2)
|
||||
mocha (1.2.1)
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
//= require markdown-it.js
|
||||
//= require ./pretty-text/engines/markdown-it/helpers
|
||||
//= require ./pretty-text/engines/markdown-it/mentions
|
||||
//= require ./pretty-text/engines/markdown-it/quotes
|
||||
//= require ./pretty-text/engines/markdown-it/emoji
|
||||
//= require ./pretty-text/engines/markdown-it/onebox
|
||||
//= require ./pretty-text/engines/markdown-it/bbcode-block
|
||||
//= require ./pretty-text/engines/markdown-it/bbcode-inline
|
||||
//= require ./pretty-text/engines/markdown-it/code
|
||||
//= require ./pretty-text/engines/markdown-it/category-hashtag
|
||||
//= require ./pretty-text/engines/markdown-it/censored
|
||||
//= require ./pretty-text/engines/markdown-it/table
|
||||
//= require ./pretty-text/engines/markdown-it/paragraph
|
||||
//= require ./pretty-text/engines/markdown-it/newline
|
||||
//= require ./pretty-text/engines/markdown-it/html_img
|
||||
//= require ./pretty-text/engines/discourse-markdown/helpers
|
||||
//= require ./pretty-text/engines/discourse-markdown/mentions
|
||||
//= require ./pretty-text/engines/discourse-markdown/quotes
|
||||
//= require ./pretty-text/engines/discourse-markdown/emoji
|
||||
//= require ./pretty-text/engines/discourse-markdown/onebox
|
||||
//= require ./pretty-text/engines/discourse-markdown/bbcode-block
|
||||
//= require ./pretty-text/engines/discourse-markdown/bbcode-inline
|
||||
//= require ./pretty-text/engines/discourse-markdown/code
|
||||
//= require ./pretty-text/engines/discourse-markdown/category-hashtag
|
||||
//= require ./pretty-text/engines/discourse-markdown/censored
|
||||
//= require ./pretty-text/engines/discourse-markdown/table
|
||||
//= require ./pretty-text/engines/discourse-markdown/paragraph
|
||||
//= require ./pretty-text/engines/discourse-markdown/newline
|
||||
//= require ./pretty-text/engines/discourse-markdown/html_img
|
||||
|
|
|
@ -3,11 +3,8 @@
|
|||
//= require ./pretty-text/censored-words
|
||||
//= require ./pretty-text/emoji/data
|
||||
//= require ./pretty-text/emoji
|
||||
//= require ./pretty-text/engines/discourse-markdown
|
||||
//= require ./pretty-text/engines/discourse-markdown-it
|
||||
//= require_tree ./pretty-text/engines/discourse-markdown
|
||||
//= require xss.min
|
||||
//= require better_markdown.js
|
||||
//= require ./pretty-text/xss
|
||||
//= require ./pretty-text/white-lister
|
||||
//= require ./pretty-text/sanitizer
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { default as WhiteLister, whiteListFeature } from 'pretty-text/white-lister';
|
||||
import { default as WhiteLister } from 'pretty-text/white-lister';
|
||||
import { sanitize } from 'pretty-text/sanitizer';
|
||||
import guid from 'pretty-text/guid';
|
||||
|
||||
|
@ -10,10 +10,10 @@ function deprecate(feature, name){
|
|||
};
|
||||
}
|
||||
|
||||
function createHelper(featureName, opts, optionCallbacks, pluginCallbacks, getOptions) {
|
||||
function createHelper(featureName, opts, optionCallbacks, pluginCallbacks, getOptions, whiteListed) {
|
||||
let helper = {};
|
||||
helper.markdownIt = true;
|
||||
helper.whiteList = info => whiteListFeature(featureName, info);
|
||||
helper.whiteList = info => whiteListed.push([featureName, info]);
|
||||
helper.registerInline = deprecate(featureName,'registerInline');
|
||||
helper.replaceBlock = deprecate(featureName,'replaceBlock');
|
||||
helper.addPreProcessor = deprecate(featureName,'addPreProcessor');
|
||||
|
@ -151,7 +151,7 @@ export function setup(opts, siteSettings, state) {
|
|||
}
|
||||
|
||||
// we got to require this late cause bundle is not loaded in pretty-text
|
||||
Helpers = Helpers || requirejs('pretty-text/engines/markdown-it/helpers');
|
||||
Helpers = Helpers || requirejs('pretty-text/engines/discourse-markdown/helpers');
|
||||
|
||||
opts.markdownIt = true;
|
||||
|
||||
|
@ -165,6 +165,7 @@ export function setup(opts, siteSettings, state) {
|
|||
|
||||
const check = /discourse-markdown\/|markdown-it\//;
|
||||
let features = [];
|
||||
let whiteListed = [];
|
||||
|
||||
Object.keys(require._eak_seen).forEach(entry => {
|
||||
if (check.test(entry)) {
|
||||
|
@ -173,7 +174,7 @@ export function setup(opts, siteSettings, state) {
|
|||
|
||||
const featureName = entry.split('/').reverse()[0];
|
||||
features.push(featureName);
|
||||
module.setup(createHelper(featureName, opts, optionCallbacks, pluginCallbacks, getOptions));
|
||||
module.setup(createHelper(featureName, opts, optionCallbacks, pluginCallbacks, getOptions, whiteListed));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -227,10 +228,16 @@ export function setup(opts, siteSettings, state) {
|
|||
opts.markdownIt = true;
|
||||
opts.setup = true;
|
||||
|
||||
if (!opts.discourse.sanitizer) {
|
||||
if (!opts.discourse.sanitizer || !opts.sanitizer) {
|
||||
const whiteLister = new WhiteLister(opts.discourse);
|
||||
|
||||
whiteListed.forEach(([feature, info]) => {
|
||||
whiteLister.whiteListFeature(feature, info);
|
||||
});
|
||||
|
||||
opts.sanitizer = opts.discourse.sanitizer = (!!opts.discourse.sanitize) ? a=>sanitize(a, whiteLister) : a=>a;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export function cook(raw, opts) {
|
||||
|
|
|
@ -1,597 +0,0 @@
|
|||
import guid from 'pretty-text/guid';
|
||||
import { default as WhiteLister, whiteListFeature } from 'pretty-text/white-lister';
|
||||
import { escape } from 'pretty-text/sanitizer';
|
||||
|
||||
var parser = window.BetterMarkdown,
|
||||
MD = parser.Markdown,
|
||||
DialectHelpers = parser.DialectHelpers,
|
||||
hoisted;
|
||||
|
||||
let currentOpts;
|
||||
|
||||
const emitters = [];
|
||||
const preProcessors = [];
|
||||
const parseNodes = [];
|
||||
|
||||
function findEndPos(text, start, stop, args, offset) {
|
||||
let endPos, nextStart;
|
||||
do {
|
||||
endPos = text.indexOf(stop, offset);
|
||||
if (endPos === -1) { return -1; }
|
||||
nextStart = text.indexOf(start, offset);
|
||||
offset = endPos + stop.length;
|
||||
} while (nextStart !== -1 && nextStart < endPos);
|
||||
return endPos;
|
||||
}
|
||||
|
||||
class DialectHelper {
|
||||
constructor() {
|
||||
this._dialect = MD.dialects.Discourse = DialectHelpers.subclassDialect(MD.dialects.Gruber);
|
||||
this._setup = false;
|
||||
}
|
||||
|
||||
escape(str) {
|
||||
return escape(str);
|
||||
}
|
||||
|
||||
getOptions() {
|
||||
return currentOpts;
|
||||
}
|
||||
|
||||
registerInlineFeature(featureName, start, fn) {
|
||||
this._dialect.inline[start] = function() {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
return fn.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
|
||||
addPreProcessorFeature(featureName, fn) {
|
||||
preProcessors.push(raw => {
|
||||
if (!currentOpts.features[featureName]) { return raw; }
|
||||
return fn(raw, hoister);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
The simplest kind of replacement possible. Replace a stirng token with JsonML.
|
||||
|
||||
For example to replace all occurrances of :) with a smile image:
|
||||
|
||||
```javascript
|
||||
helper.inlineReplace(':)', text => ['img', {src: '/images/smile.png'}]);
|
||||
```
|
||||
**/
|
||||
inlineReplaceFeature(featureName, token, emitter) {
|
||||
this.registerInline(token, (text, match, prev) => {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
return [token.length, emitter.call(this, token, match, prev)];
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
After the parser has been executed, change the contents of a HTML tag.
|
||||
|
||||
Let's say you want to replace the contents of all code tags to prepend
|
||||
"EVIL TROUT HACKED YOUR CODE!":
|
||||
|
||||
```javascript
|
||||
helper.postProcessTag('code', contents => `EVIL TROUT HACKED YOUR CODE!\n\n${contents}`);
|
||||
```
|
||||
**/
|
||||
postProcessTagFeature(featureName, tag, emitter) {
|
||||
this.onParseNode(event => {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
const node = event.node;
|
||||
if (node[0] === tag) {
|
||||
node[node.length-1] = emitter(node[node.length-1]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
Matches inline using a regular expression. The emitter function is passed
|
||||
the matches from the regular expression.
|
||||
|
||||
For example, this auto links URLs:
|
||||
|
||||
```javascript
|
||||
helper.inlineRegexp({
|
||||
matcher: /((?:https?:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.])(?:[^\s()<>]+|\([^\s()<>]+\))+(?:\([^\s()<>]+\)|[^`!()\[\]{};:'".,<>?«»“”‘’\s]))/gm,
|
||||
spaceBoundary: true,
|
||||
start: 'http',
|
||||
|
||||
emitter(matches) {
|
||||
const url = matches[1];
|
||||
return ['a', {href: url}, url];
|
||||
}
|
||||
});
|
||||
```
|
||||
**/
|
||||
inlineRegexpFeature(featureName, args) {
|
||||
this.registerInline(args.start, function(text, match, prev) {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
if (invalidBoundary(args, prev)) { return; }
|
||||
|
||||
args.matcher.lastIndex = 0;
|
||||
const m = args.matcher.exec(text);
|
||||
if (m) {
|
||||
const result = args.emitter.call(this, m);
|
||||
if (result) {
|
||||
return [m[0].length, result];
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
Handles inline replacements surrounded by tokens.
|
||||
|
||||
For example, to handle markdown style bold. Note we use `concat` on the array because
|
||||
the contents are JsonML too since we didn't pass `rawContents` as true. This supports
|
||||
recursive markup.
|
||||
|
||||
```javascript
|
||||
helper.inlineBetween({
|
||||
between: '**',
|
||||
wordBoundary: true.
|
||||
emitter(contents) {
|
||||
return ['strong'].concat(contents);
|
||||
}
|
||||
});
|
||||
```
|
||||
**/
|
||||
inlineBetweenFeature(featureName, args) {
|
||||
const start = args.start || args.between;
|
||||
const stop = args.stop || args.between;
|
||||
const startLength = start.length;
|
||||
|
||||
this.registerInline(start, function(text, match, prev) {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
if (invalidBoundary(args, prev)) { return; }
|
||||
|
||||
const endPos = findEndPos(text, start, stop, args, startLength);
|
||||
if (endPos === -1) { return; }
|
||||
var between = text.slice(startLength, endPos);
|
||||
|
||||
// If rawcontents is set, don't process inline
|
||||
if (!args.rawContents) {
|
||||
between = this.processInline(between);
|
||||
}
|
||||
|
||||
var contents = args.emitter.call(this, between);
|
||||
if (contents) {
|
||||
return [endPos+stop.length, contents];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
Replaces a block of text between a start and stop. As opposed to inline, these
|
||||
might span multiple lines.
|
||||
|
||||
Here's an example that takes the content between `[code]` ... `[/code]` and
|
||||
puts them inside a `pre` tag:
|
||||
|
||||
```javascript
|
||||
helper.replaceBlock({
|
||||
start: /(\[code\])([\s\S]*)/igm,
|
||||
stop: '[/code]',
|
||||
rawContents: true,
|
||||
|
||||
emitter(blockContents) {
|
||||
return ['p', ['pre'].concat(blockContents)];
|
||||
}
|
||||
});
|
||||
```
|
||||
**/
|
||||
replaceBlockFeature(featureName, args) {
|
||||
function blockFunc(block, next) {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
|
||||
const linebreaks = currentOpts.traditionalMarkdownLinebreaks;
|
||||
if (linebreaks && args.skipIfTradtionalLinebreaks) { return; }
|
||||
|
||||
args.start.lastIndex = 0;
|
||||
const result = [];
|
||||
const match = (args.start).exec(block);
|
||||
if (!match) { return; }
|
||||
|
||||
const lastChance = () => !next.some(blk => blk.match(args.stop));
|
||||
|
||||
// shave off start tag and leading text, if any.
|
||||
const pos = args.start.lastIndex - match[0].length;
|
||||
const leading = block.slice(0, pos);
|
||||
const trailing = match[2] ? match[2].replace(/^\n*/, "") : "";
|
||||
|
||||
// The other leading block should be processed first! eg a code block wrapped around a code block.
|
||||
if (args.withoutLeading && args.withoutLeading.test(leading)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// just give up if there's no stop tag in this or any next block
|
||||
args.stop.lastIndex = block.length - trailing.length;
|
||||
if (!args.stop.exec(block) && lastChance()) { return; }
|
||||
if (leading.length > 0) {
|
||||
var parsedLeading = this.processBlock(MD.mk_block(leading), []);
|
||||
if (parsedLeading && parsedLeading[0]) {
|
||||
result.push(parsedLeading[0]);
|
||||
}
|
||||
}
|
||||
if (trailing.length > 0) {
|
||||
next.unshift(MD.mk_block(trailing, block.trailing,
|
||||
block.lineNumber + countLines(leading) + (match[2] ? match[2].length : 0) - trailing.length));
|
||||
}
|
||||
|
||||
// go through the available blocks to find the matching stop tag.
|
||||
const contentBlocks = [];
|
||||
let nesting = 0;
|
||||
let actualEndPos = -1;
|
||||
let currentBlock;
|
||||
|
||||
blockloop:
|
||||
while (currentBlock = next.shift()) {
|
||||
|
||||
// collect all the start and stop tags in the current block
|
||||
args.start.lastIndex = 0;
|
||||
const startPos = [];
|
||||
let m;
|
||||
while (m = (args.start).exec(currentBlock)) {
|
||||
startPos.push(args.start.lastIndex - m[0].length);
|
||||
args.start.lastIndex = args.start.lastIndex - (m[2] ? m[2].length : 0);
|
||||
}
|
||||
args.stop.lastIndex = 0;
|
||||
const endPos = [];
|
||||
while (m = (args.stop).exec(currentBlock)) {
|
||||
endPos.push(args.stop.lastIndex - m[0].length);
|
||||
}
|
||||
|
||||
// go through the available end tags:
|
||||
let ep = 0;
|
||||
let sp = 0;
|
||||
while (ep < endPos.length) {
|
||||
if (sp < startPos.length && startPos[sp] < endPos[ep]) {
|
||||
// there's an end tag, but there's also another start tag first. we need to go deeper.
|
||||
sp++; nesting++;
|
||||
} else if (nesting > 0) {
|
||||
// found an end tag, but we must go up a level first.
|
||||
ep++; nesting--;
|
||||
} else {
|
||||
// found an end tag and we're at the top: done! -- or: start tag and end tag are
|
||||
// identical, (i.e. startPos[sp] == endPos[ep]), so we don't do nesting at all.
|
||||
actualEndPos = endPos[ep];
|
||||
break blockloop;
|
||||
}
|
||||
}
|
||||
|
||||
if (lastChance()) {
|
||||
// when lastChance() becomes true the first time, currentBlock contains the last
|
||||
// end tag available in the input blocks but it's not on the right nesting level
|
||||
// or we would have terminated the loop already. the only thing we can do is to
|
||||
// treat the last available end tag as tho it were matched with our start tag
|
||||
// and let the emitter figure out how to render the garbage inside.
|
||||
actualEndPos = endPos[endPos.length - 1];
|
||||
break;
|
||||
}
|
||||
|
||||
// any left-over start tags still increase the nesting level
|
||||
nesting += startPos.length - sp;
|
||||
contentBlocks.push(currentBlock);
|
||||
}
|
||||
|
||||
const stopLen = currentBlock.match(args.stop)[0].length;
|
||||
const before = currentBlock.slice(0, actualEndPos).replace(/\n*$/, "");
|
||||
const after = currentBlock.slice(actualEndPos + stopLen).replace(/^\n*/, "");
|
||||
if (before.length > 0) contentBlocks.push(MD.mk_block(before, "", currentBlock.lineNumber));
|
||||
if (after.length > 0) next.unshift(MD.mk_block(after, currentBlock.trailing, currentBlock.lineNumber + countLines(before)));
|
||||
|
||||
const emitterResult = args.emitter.call(this, contentBlocks, match);
|
||||
if (emitterResult) { result.push(emitterResult); }
|
||||
return result;
|
||||
};
|
||||
|
||||
if (args.priority) {
|
||||
blockFunc.priority = args.priority;
|
||||
}
|
||||
|
||||
this.registerBlock(args.start.toString(), blockFunc);
|
||||
}
|
||||
|
||||
/**
|
||||
After the parser has been executed, post process any text nodes in the HTML document.
|
||||
This is useful if you want to apply a transformation to the text.
|
||||
|
||||
If you are generating HTML from the text, it is preferable to use the replacer
|
||||
functions and do it in the parsing part of the pipeline. This function is best for
|
||||
simple transformations or transformations that have to happen after all earlier
|
||||
processing is done.
|
||||
|
||||
For example, to convert all text to upper case:
|
||||
|
||||
```javascript
|
||||
helper.postProcessText(function (text) {
|
||||
return text.toUpperCase();
|
||||
});
|
||||
```
|
||||
**/
|
||||
postProcessTextFeature(featureName, fn) {
|
||||
emitters.push(function () {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
return fn.apply(this, arguments);
|
||||
});
|
||||
}
|
||||
|
||||
onParseNodeFeature(featureName, fn) {
|
||||
parseNodes.push(function () {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
return fn.apply(this, arguments);
|
||||
});
|
||||
}
|
||||
|
||||
registerBlockFeature(featureName, name, fn) {
|
||||
const blockFunc = function() {
|
||||
if (!currentOpts.features[featureName]) { return; }
|
||||
return fn.apply(this, arguments);
|
||||
};
|
||||
|
||||
blockFunc.priority = fn.priority;
|
||||
this._dialect.block[name] = blockFunc;
|
||||
}
|
||||
|
||||
applyFeature(featureName, module) {
|
||||
helper.registerInline = (code, fn) => helper.registerInlineFeature(featureName, code, fn);
|
||||
helper.replaceBlock = args => helper.replaceBlockFeature(featureName, args);
|
||||
helper.addPreProcessor = fn => helper.addPreProcessorFeature(featureName, fn);
|
||||
helper.inlineReplace = (token, emitter) => helper.inlineReplaceFeature(featureName, token, emitter);
|
||||
helper.postProcessTag = (token, emitter) => helper.postProcessTagFeature(featureName, token, emitter);
|
||||
helper.inlineRegexp = args => helper.inlineRegexpFeature(featureName, args);
|
||||
helper.inlineBetween = args => helper.inlineBetweenFeature(featureName, args);
|
||||
helper.postProcessText = fn => helper.postProcessTextFeature(featureName, fn);
|
||||
helper.onParseNode = fn => helper.onParseNodeFeature(featureName, fn);
|
||||
helper.registerBlock = (name, fn) => helper.registerBlockFeature(featureName, name, fn);
|
||||
|
||||
module.setup(this);
|
||||
}
|
||||
|
||||
setup() {
|
||||
if (this._setup) { return; }
|
||||
this._setup = true;
|
||||
|
||||
Object.keys(require._eak_seen).forEach(entry => {
|
||||
if (entry.indexOf('discourse-markdown') !== -1) {
|
||||
const module = requirejs(entry);
|
||||
if (module && module.setup) {
|
||||
const featureName = entry.split('/').reverse()[0];
|
||||
helper.whiteList = info => whiteListFeature(featureName, info);
|
||||
|
||||
this.applyFeature(featureName, module);
|
||||
helper.whiteList = undefined;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
MD.buildBlockOrder(this._dialect.block);
|
||||
var index = this._dialect.block.__order__.indexOf("code");
|
||||
if (index > -1) {
|
||||
this._dialect.block.__order__.splice(index, 1);
|
||||
this._dialect.block.__order__.unshift("code");
|
||||
}
|
||||
MD.buildInlinePatterns(this._dialect.inline);
|
||||
}
|
||||
};
|
||||
|
||||
const helper = new DialectHelper();
|
||||
|
||||
export function cook(raw, opts) {
|
||||
currentOpts = opts;
|
||||
|
||||
hoisted = {};
|
||||
|
||||
if (!currentOpts.enableExperimentalMarkdownIt) {
|
||||
raw = hoistCodeBlocksAndSpans(raw);
|
||||
preProcessors.forEach(p => raw = p(raw));
|
||||
}
|
||||
|
||||
const whiteLister = new WhiteLister(opts);
|
||||
|
||||
let result;
|
||||
|
||||
if (currentOpts.enableExperimentalMarkdownIt) {
|
||||
result = opts.sanitizer(
|
||||
requirejs('pretty-text/engines/markdown-it/instance').default(opts).render(raw),
|
||||
whiteLister
|
||||
);
|
||||
} else {
|
||||
const tree = parser.toHTMLTree(raw, 'Discourse');
|
||||
result = opts.sanitizer(parser.renderJsonML(parseTree(tree, opts)), whiteLister);
|
||||
}
|
||||
|
||||
// If we hoisted out anything, put it back
|
||||
const keys = Object.keys(hoisted);
|
||||
if (keys.length) {
|
||||
let found = true;
|
||||
|
||||
const unhoist = function(key) {
|
||||
result = result.replace(new RegExp(key, "g"), function() {
|
||||
found = true;
|
||||
return hoisted[key];
|
||||
});
|
||||
};
|
||||
|
||||
while (found) {
|
||||
found = false;
|
||||
keys.forEach(unhoist);
|
||||
}
|
||||
}
|
||||
|
||||
return result.trim();
|
||||
}
|
||||
|
||||
export function setup() {
|
||||
helper.setup();
|
||||
}
|
||||
|
||||
function processTextNodes(node, event, emitter) {
|
||||
if (node.length < 2) { return; }
|
||||
|
||||
if (node[0] === '__RAW') {
|
||||
const hash = guid();
|
||||
hoisted[hash] = node[1];
|
||||
node[1] = hash;
|
||||
return;
|
||||
}
|
||||
|
||||
for (var j=1; j<node.length; j++) {
|
||||
var textContent = node[j];
|
||||
if (typeof textContent === "string") {
|
||||
var result = emitter(textContent, event);
|
||||
if (result) {
|
||||
if (result instanceof Array) {
|
||||
node.splice.apply(node, [j, 1].concat(result));
|
||||
} else {
|
||||
node[j] = result;
|
||||
}
|
||||
} else {
|
||||
node[j] = textContent;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a JSON ML tree, using registered handlers to adjust it if necessary.
|
||||
function parseTree(tree, options, path, insideCounts) {
|
||||
|
||||
if (tree instanceof Array) {
|
||||
const event = {node: tree, options, path, insideCounts: insideCounts || {}};
|
||||
parseNodes.forEach(fn => fn(event));
|
||||
|
||||
for (var j=0; j<emitters.length; j++) {
|
||||
processTextNodes(tree, event, emitters[j]);
|
||||
}
|
||||
|
||||
path = path || [];
|
||||
insideCounts = insideCounts || {};
|
||||
|
||||
path.push(tree);
|
||||
|
||||
for (var i=1; i<tree.length; i++) {
|
||||
var n = tree[i],
|
||||
tagName = n[0];
|
||||
|
||||
insideCounts[tagName] = (insideCounts[tagName] || 0) + 1;
|
||||
|
||||
if (n && n.length === 2 && n[0] === "p" && /^<!--([\s\S]*)-->$/.exec(n[1])) {
|
||||
// Remove paragraphs around comment-only nodes.
|
||||
tree[i] = n[1];
|
||||
} else {
|
||||
parseTree(n, options, path, insideCounts);
|
||||
}
|
||||
|
||||
insideCounts[tagName] = insideCounts[tagName] - 1;
|
||||
}
|
||||
|
||||
// If raw nodes are in paragraphs, pull them up
|
||||
if (tree.length === 2 && tree[0] === 'p' && tree[1] instanceof Array && tree[1][0] === "__RAW") {
|
||||
var text = tree[1][1];
|
||||
tree[0] = "__RAW";
|
||||
tree[1] = text;
|
||||
}
|
||||
|
||||
path.pop();
|
||||
}
|
||||
return tree;
|
||||
}
|
||||
|
||||
// Returns true if there's an invalid word boundary for a match.
|
||||
function invalidBoundary(args, prev) {
|
||||
if (!(args.wordBoundary || args.spaceBoundary || args.spaceOrTagBoundary)) { return false; }
|
||||
|
||||
var last = prev[prev.length - 1];
|
||||
if (typeof last !== "string") { return false; }
|
||||
|
||||
if (args.wordBoundary && (!last.match(/\W$/))) { return true; }
|
||||
if (args.spaceBoundary && (!last.match(/\s$/))) { return true; }
|
||||
if (args.spaceOrTagBoundary && (!last.match(/(\s|\>|\()$/))) { return true; }
|
||||
}
|
||||
|
||||
function countLines(str) {
|
||||
let index = -1, count = 0;
|
||||
while ((index = str.indexOf("\n", index + 1)) !== -1) { count++; }
|
||||
return count;
|
||||
}
|
||||
|
||||
function hoister(t, target, replacement) {
|
||||
const regexp = new RegExp(target.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&'), "g");
|
||||
if (t.match(regexp)) {
|
||||
const hash = guid();
|
||||
t = t.replace(regexp, hash);
|
||||
hoisted[hash] = replacement;
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
function outdent(t) {
|
||||
return t.replace(/^([ ]{4}|\t)/gm, "");
|
||||
}
|
||||
|
||||
function removeEmptyLines(t) {
|
||||
return t.replace(/^\n+/, "").replace(/\s+$/, "");
|
||||
}
|
||||
|
||||
function hideBackslashEscapedCharacters(t) {
|
||||
return t.replace(/\\\\/g, "\u1E800").replace(/\\`/g, "\u1E8001");
|
||||
}
|
||||
|
||||
function showBackslashEscapedCharacters(t) {
|
||||
return t.replace(/\u1E8001/g, "\\`").replace(/\u1E800/g, "\\\\");
|
||||
}
|
||||
|
||||
function hoistCodeBlocksAndSpans(text) {
|
||||
// replace all "\`" with a single character
|
||||
text = hideBackslashEscapedCharacters(text);
|
||||
|
||||
// /!\ the order is important /!\
|
||||
|
||||
// fenced code blocks (AKA GitHub code blocks)
|
||||
text = text.replace(/(^\n*|\n)```([a-z0-9\-]*)\n([\s\S]*?)\n```/g, function(_, before, language, content) {
|
||||
const hash = guid();
|
||||
hoisted[hash] = escape(showBackslashEscapedCharacters(removeEmptyLines(content)));
|
||||
return before + "```" + language + "\n" + hash + "\n```";
|
||||
});
|
||||
|
||||
// markdown code blocks
|
||||
text = text.replace(/(^\n*|\n\n)((?:(?:[ ]{4}|\t).*\n*)+)/g, function(match, before, content, index) {
|
||||
// make sure we aren't in a list
|
||||
var previousLine = text.slice(0, index).trim().match(/.*$/);
|
||||
if (previousLine && previousLine[0].length) {
|
||||
previousLine = previousLine[0].trim();
|
||||
if (/^(?:\*|\+|-|\d+\.)\s+/.test(previousLine)) {
|
||||
return match;
|
||||
}
|
||||
}
|
||||
// we can safely hoist the code block
|
||||
const hash = guid();
|
||||
hoisted[hash] = escape(outdent(showBackslashEscapedCharacters(removeEmptyLines(content))));
|
||||
return before + " " + hash + "\n";
|
||||
});
|
||||
|
||||
// <pre>...</pre> code blocks
|
||||
text = text.replace(/(\s|^)<pre>([\s\S]*?)<\/pre>/ig, function(_, before, content) {
|
||||
const hash = guid();
|
||||
hoisted[hash] = escape(showBackslashEscapedCharacters(removeEmptyLines(content)));
|
||||
return before + "<pre>" + hash + "</pre>";
|
||||
});
|
||||
|
||||
// code spans (double & single `)
|
||||
["``", "`"].forEach(function(delimiter) {
|
||||
var regexp = new RegExp("(^|[^`])" + delimiter + "([^`\\n]+?)" + delimiter + "([^`]|$)", "g");
|
||||
text = text.replace(regexp, function(_, before, content, after) {
|
||||
const hash = guid();
|
||||
hoisted[hash] = escape(showBackslashEscapedCharacters(content.trim()));
|
||||
return before + delimiter + hash + delimiter + after;
|
||||
});
|
||||
});
|
||||
|
||||
// replace back all weird character with "\`"
|
||||
return showBackslashEscapedCharacters(text);
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// This addition handles auto linking of text. When included, it will parse out links and create
|
||||
// `<a href>`s for them.
|
||||
|
||||
const urlReplacerArgs = {
|
||||
matcher: /^((?:https?:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.])(?:[^\s()<>]+|\([^\s()<>]+\))+(?:\([^\s()<>]+\)|[^`!()\[\]{};:'".,<>?«»“”‘’\s]))/,
|
||||
spaceOrTagBoundary: true,
|
||||
|
||||
emitter(matches) {
|
||||
const url = matches[1];
|
||||
let href = url;
|
||||
|
||||
// Don't autolink a markdown link to something
|
||||
if (url.match(/\]\[\d$/)) { return; }
|
||||
|
||||
// If we improperly caught a markdown link abort
|
||||
if (url.match(/\(http/)) { return; }
|
||||
|
||||
if (url.match(/^www/)) { href = "http://" + url; }
|
||||
return ['a', { href }, url];
|
||||
}
|
||||
};
|
||||
|
||||
export function setup(helper) {
|
||||
if (helper.markdownIt) { return; }
|
||||
helper.inlineRegexp(_.merge({start: 'http'}, urlReplacerArgs));
|
||||
helper.inlineRegexp(_.merge({start: 'www'}, urlReplacerArgs));
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
import { parseBBCodeTag } from 'pretty-text/engines/markdown-it/bbcode-block';
|
||||
import { parseBBCodeTag } from 'pretty-text/engines/discourse-markdown/bbcode-block';
|
||||
|
||||
function tokanizeBBCode(state, silent, ruler) {
|
||||
|
||||
|
@ -57,6 +57,7 @@ function tokanizeBBCode(state, silent, ruler) {
|
|||
|
||||
let token = state.push('text', '' , 0);
|
||||
token.content = state.src.slice(pos, pos+tagInfo.length);
|
||||
token.meta = 'bbcode';
|
||||
|
||||
state.delimiters.push({
|
||||
bbInfo: tagInfo,
|
||||
|
@ -105,10 +106,15 @@ function processBBCode(state, silent) {
|
|||
let tag, className;
|
||||
|
||||
if (typeof tagInfo.rule.wrap === 'function') {
|
||||
if (!tagInfo.rule.wrap(token, tagInfo)) {
|
||||
return false;
|
||||
let content = "";
|
||||
for (let j = startDelim.token+1; j < endDelim.token; j++) {
|
||||
let inner = state.tokens[j];
|
||||
if (inner.type === 'text' && inner.meta !== 'bbcode') {
|
||||
content += inner.content;
|
||||
}
|
||||
}
|
||||
tag = token.tag;
|
||||
tagInfo.rule.wrap(token, state.tokens[endDelim.token], tagInfo, content);
|
||||
continue;
|
||||
} else {
|
||||
let split = tagInfo.rule.wrap.split('.');
|
||||
tag = split[0];
|
||||
|
@ -160,19 +166,35 @@ export function setup(helper) {
|
|||
}
|
||||
});
|
||||
|
||||
const simpleUrlRegex = /^http[s]?:\/\//;
|
||||
ruler.push('url', {
|
||||
tag: 'url',
|
||||
replace: function(state, tagInfo, content) {
|
||||
let token;
|
||||
wrap: function(startToken, endToken, tagInfo, content) {
|
||||
|
||||
token = state.push('link_open', 'a', 1);
|
||||
token.attrs = [['href', content], ['data-bbcode', 'true']];
|
||||
const url = (tagInfo.attrs['_default'] || content).trim();
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = content;
|
||||
if (simpleUrlRegex.test(url)) {
|
||||
startToken.type = 'link_open';
|
||||
startToken.tag = 'a';
|
||||
startToken.attrs = [['href', url], ['data-bbcode', 'true']];
|
||||
startToken.content = '';
|
||||
startToken.nesting = 1;
|
||||
|
||||
token = state.push('link_close', 'a', -1);
|
||||
return true;
|
||||
endToken.type = 'link_close';
|
||||
endToken.tag = 'a';
|
||||
endToken.content = '';
|
||||
endToken.nesting = -1;
|
||||
} else {
|
||||
// just strip the bbcode tag
|
||||
endToken.content = '';
|
||||
startToken.content = '';
|
||||
|
||||
// edge case, we don't want this detected as a onebox if auto linked
|
||||
// this ensures it is not stripped
|
||||
startToken.type = 'html_inline';
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -180,9 +202,10 @@ export function setup(helper) {
|
|||
tag: 'email',
|
||||
replace: function(state, tagInfo, content) {
|
||||
let token;
|
||||
let email = tagInfo.attrs['_default'] || content;
|
||||
|
||||
token = state.push('link_open', 'a', 1);
|
||||
token.attrs = [['href', 'mailto:' + content], ['data-bbcode', 'true']];
|
||||
token.attrs = [['href', 'mailto:' + email], ['data-bbcode', 'true']];
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = content;
|
|
@ -1,170 +0,0 @@
|
|||
export function register(helper, codeName, args, emitter) {
|
||||
// Optional second param for args
|
||||
if (typeof args === "function") {
|
||||
emitter = args;
|
||||
args = {};
|
||||
}
|
||||
|
||||
helper.replaceBlock({
|
||||
start: new RegExp("\\[" + codeName + "(=[^\\[\\]]+)?\\]([\\s\\S]*)", "igm"),
|
||||
stop: new RegExp("\\[\\/" + codeName + "\\]", "igm"),
|
||||
emitter(blockContents, matches) {
|
||||
|
||||
|
||||
const options = helper.getOptions();
|
||||
while (blockContents.length && (typeof blockContents[0] === "string" || blockContents[0] instanceof String)) {
|
||||
blockContents[0] = String(blockContents[0]).replace(/^\s+/, '');
|
||||
if (!blockContents[0].length) {
|
||||
blockContents.shift();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let contents = [];
|
||||
if (blockContents.length) {
|
||||
const nextContents = blockContents.slice(1);
|
||||
blockContents = this.processBlock(blockContents[0], nextContents);
|
||||
|
||||
nextContents.forEach(nc => {
|
||||
blockContents = blockContents.concat(this.processBlock(nc, []));
|
||||
});
|
||||
|
||||
blockContents.forEach(bc => {
|
||||
if (typeof bc === "string" || bc instanceof String) {
|
||||
var processed = this.processInline(String(bc));
|
||||
if (processed.length) {
|
||||
contents.push(['p'].concat(processed));
|
||||
}
|
||||
} else {
|
||||
contents.push(bc);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (!args.singlePara && contents.length === 1 && contents[0] instanceof Array && contents[0][0] === "para") {
|
||||
contents[0].shift();
|
||||
contents = contents[0];
|
||||
}
|
||||
const result = emitter(contents, matches[1] ? matches[1].replace(/^=|\"/g, '') : null, options);
|
||||
return args.noWrap ? result : ['p', result];
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export function builders(helper) {
|
||||
function replaceBBCode(tag, emitter, opts) {
|
||||
const start = `[${tag}]`;
|
||||
const stop = `[/${tag}]`;
|
||||
|
||||
opts = opts || {};
|
||||
opts = _.merge(opts, { start, stop, emitter });
|
||||
helper.inlineBetween(opts);
|
||||
|
||||
opts = _.merge(opts, { start: start.toUpperCase(), stop: stop.toUpperCase(), emitter });
|
||||
helper.inlineBetween(opts);
|
||||
}
|
||||
|
||||
return {
|
||||
replaceBBCode,
|
||||
|
||||
register(codeName, args, emitter) {
|
||||
register(helper, codeName, args, emitter);
|
||||
},
|
||||
|
||||
rawBBCode(tag, emitter) {
|
||||
replaceBBCode(tag, emitter, { rawContents: true });
|
||||
},
|
||||
|
||||
removeEmptyLines(contents) {
|
||||
const result = [];
|
||||
for (let i=0; i < contents.length; i++) {
|
||||
if (contents[i] !== "\n") { result.push(contents[i]); }
|
||||
}
|
||||
return result;
|
||||
},
|
||||
|
||||
replaceBBCodeParamsRaw(tag, emitter) {
|
||||
var opts = {
|
||||
rawContents: true,
|
||||
emitter(contents) {
|
||||
const m = /^([^\]]+)\]([\S\s]*)$/.exec(contents);
|
||||
if (m) { return emitter.call(this, m[1], m[2]); }
|
||||
}
|
||||
};
|
||||
|
||||
helper.inlineBetween(_.merge(opts, { start: "[" + tag + "=", stop: "[/" + tag + "]" }));
|
||||
|
||||
tag = tag.toUpperCase();
|
||||
helper.inlineBetween(_.merge(opts, { start: "[" + tag + "=", stop: "[/" + tag + "]" }));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
|
||||
helper.whiteList(['span.bbcode-b', 'span.bbcode-i', 'span.bbcode-u', 'span.bbcode-s']);
|
||||
|
||||
const { replaceBBCode, rawBBCode, removeEmptyLines, replaceBBCodeParamsRaw } = builders(helper);
|
||||
|
||||
replaceBBCode('b', contents => ['span', {'class': 'bbcode-b'}].concat(contents));
|
||||
replaceBBCode('i', contents => ['span', {'class': 'bbcode-i'}].concat(contents));
|
||||
replaceBBCode('u', contents => ['span', {'class': 'bbcode-u'}].concat(contents));
|
||||
replaceBBCode('s', contents => ['span', {'class': 'bbcode-s'}].concat(contents));
|
||||
|
||||
replaceBBCode('ul', contents => ['ul'].concat(removeEmptyLines(contents)));
|
||||
replaceBBCode('ol', contents => ['ol'].concat(removeEmptyLines(contents)));
|
||||
replaceBBCode('li', contents => ['li'].concat(removeEmptyLines(contents)));
|
||||
|
||||
rawBBCode('img', href => ['img', {href}]);
|
||||
rawBBCode('email', contents => ['a', {href: "mailto:" + contents, 'data-bbcode': true}, contents]);
|
||||
|
||||
replaceBBCode('url', contents => {
|
||||
if (!Array.isArray(contents)) { return; }
|
||||
|
||||
const first = contents[0];
|
||||
if (contents.length === 1 && Array.isArray(first) && first[0] === 'a') {
|
||||
// single-line bbcode links shouldn't be oneboxed, so we mark this as a bbcode link.
|
||||
if (typeof first[1] !== 'object') { first.splice(1, 0, {}); }
|
||||
first[1]['data-bbcode'] = true;
|
||||
}
|
||||
return ['concat'].concat(contents);
|
||||
});
|
||||
|
||||
replaceBBCodeParamsRaw('url', function(param, contents) {
|
||||
const url = param.replace(/(^")|("$)/g, '');
|
||||
return ['a', {'href': url}].concat(this.processInline(contents));
|
||||
});
|
||||
|
||||
replaceBBCodeParamsRaw("email", function(param, contents) {
|
||||
return ['a', {href: "mailto:" + param, 'data-bbcode': true}].concat(contents);
|
||||
});
|
||||
|
||||
helper.onParseNode(event => {
|
||||
if (!Array.isArray(event.node)) { return; }
|
||||
const result = [event.node[0]];
|
||||
const nodes = event.node.slice(1);
|
||||
for (let i = 0; i < nodes.length; i++) {
|
||||
if (Array.isArray(nodes[i]) && nodes[i][0] === 'concat') {
|
||||
for (let j = 1; j < nodes[i].length; j++) { result.push(nodes[i][j]); }
|
||||
} else {
|
||||
result.push(nodes[i]);
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < result.length; i++) { event.node[i] = result[i]; }
|
||||
});
|
||||
|
||||
helper.replaceBlock({
|
||||
start: /(\[code\])([\s\S]*)/igm,
|
||||
stop: /\[\/code\]/igm,
|
||||
rawContents: true,
|
||||
|
||||
emitter(blockContents) {
|
||||
const options = helper.getOptions();
|
||||
const inner = blockContents.join("\n");
|
||||
const defaultCodeLang = options.defaultCodeLang;
|
||||
return ['p', ['pre', ['code', {'class': `lang-${defaultCodeLang}`}, inner]]];
|
||||
}
|
||||
});
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
import guid from 'pretty-text/guid';
|
||||
|
||||
/**
|
||||
markdown-js doesn't ensure that em/strong codes are present on word boundaries.
|
||||
So we create our own handlers here.
|
||||
**/
|
||||
|
||||
// From PageDown
|
||||
const aLetter = /[a-zA-Z0-9\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0523\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0621-\u064a\u0660-\u0669\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07c0-\u07ea\u07f4-\u07f5\u07fa\u0904-\u0939\u093d\u0950\u0958-\u0961\u0966-\u096f\u0971-\u0972\u097b-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09e6-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a66-\u0a6f\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0ae6-\u0aef\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b66-\u0b6f\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0be6-\u0bef\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c66-\u0c6f\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0ce6-\u0cef\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d28\u0d2a-\u0d39\u0d3d\u0d60-\u0d61\u0d66-\u0d6f\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e46\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0ed0-\u0ed9\u0edc-\u0edd\u0f00\u0f20-\u0f29\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f-\u1049\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1090-\u1099\u10a0-\u10c5\u10d0-\u10fa\u10fc\u1100-\u1159\u115f-\u11a2\u11a8-\u11f9\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u17e0-\u17e9\u1810-\u1819\u1820-\u1877\u1880-\u18a8\u18aa\u1900-\u191c\u1946-\u196d\u1970-\u1974\u1980-\u19a9\u19c1-\u19c7\u19d0-\u19d9\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b50-\u1b59\u1b83-\u1ba0\u1bae-\u1bb9\u1c00-\u1c23\u1c40-\u1c49\u1c4d-\u1c7d\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u2094\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2183-\u2184\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2c6f\u2c71-\u2c7d\u2c80-\u2ce4\u2d00-\u2d25\u2d30-\u2d65\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3006\u3031-\u3035\u303b-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fc3\ua000-\ua48c\ua500-\ua60c\ua610-\ua62b\ua640-\ua65f\ua662-\ua66e\ua67f-\ua697\ua717-\ua71f\ua722-\ua788\ua78b-\ua78c\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8d0-\ua8d9\ua900-\ua925\ua930-\ua946\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa50-\uaa59\uac00-\ud7a3\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe33-\ufe34\ufe4d-\ufe4f\ufe70-\ufe74\ufe76-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]/;
|
||||
|
||||
|
||||
function unhoist(obj,from,to){
|
||||
let unhoisted = 0;
|
||||
const regex = new RegExp(from, "g");
|
||||
|
||||
if(_.isArray(obj)){
|
||||
for (let i=0; i<obj.length; i++){
|
||||
const item = obj[i];
|
||||
|
||||
if (_.isString(item)) {
|
||||
// Odd, but we need +1 for the / in front of /*
|
||||
const matches = item.match(regex);
|
||||
unhoisted -= matches ? matches.length : 0;
|
||||
|
||||
obj[i] = item.replace(regex, to);
|
||||
unhoisted += item.length - obj[i].length;
|
||||
}
|
||||
if (_.isArray(item)) {
|
||||
unhoisted += unhoist(item, from, to);
|
||||
}
|
||||
}
|
||||
}
|
||||
return unhoisted;
|
||||
};
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
|
||||
function replaceMarkdown(match, tag) {
|
||||
const hash = guid();
|
||||
|
||||
helper.registerInline(match, function(text, matched, prev) {
|
||||
if (!text || text.length < match.length + 1) { return; }
|
||||
|
||||
let lastText = prev[prev.length-1];
|
||||
lastText = typeof lastText === "string" && lastText;
|
||||
lastText = lastText && lastText[lastText.length-1];
|
||||
|
||||
if (lastText && (lastText === "/" || lastText.match(aLetter))) { return; }
|
||||
if (text[match.length].match(/\s/)) { return; }
|
||||
|
||||
// hoist out escaped \*
|
||||
text = text.replace(new RegExp("\\\\\\" + match[0], "g"), hash);
|
||||
|
||||
const endText = new RegExp("[^\\s|" + match[0] + "]" + match.replace(/\*/g,"\\*") + "([^" + match[0] + "]|$)");
|
||||
const finish = text.split("\n")[0].search(endText);
|
||||
if (finish && finish >= 0) {
|
||||
const newText = this.processInline(text.substring(match.length, finish+1));
|
||||
const unhoisted_length = unhoist(newText,hash,match[0]);
|
||||
const array = typeof tag === "string" ? [tag].concat(newText) : [tag[0], [tag[1]].concat(newText)];
|
||||
return [(finish + match.length + 1) - unhoisted_length, array];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
replaceMarkdown('***', ['strong','em']);
|
||||
replaceMarkdown('___', ['strong','em']);
|
||||
replaceMarkdown('**', 'strong');
|
||||
replaceMarkdown('__', 'strong');
|
||||
replaceMarkdown('*', 'em');
|
||||
replaceMarkdown('_', 'em');
|
||||
};
|
|
@ -1,20 +1,104 @@
|
|||
function addHashtag(buffer, matches, state) {
|
||||
const options = state.md.options.discourse;
|
||||
const [hashtag, slug] = matches;
|
||||
const categoryHashtagLookup = options.categoryHashtagLookup;
|
||||
const result = categoryHashtagLookup && categoryHashtagLookup(slug);
|
||||
|
||||
let token;
|
||||
|
||||
if (result) {
|
||||
token = new state.Token('link_open', 'a', 1);
|
||||
token.attrs = [['class', 'hashtag'], ['href', result[0]]];
|
||||
token.block = false;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = '#';
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('span_open', 'span', 1);
|
||||
token.block = false;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = result[1];
|
||||
buffer.push(token);
|
||||
|
||||
buffer.push(new state.Token('span_close', 'span', -1));
|
||||
|
||||
buffer.push(new state.Token('link_close', 'a', -1));
|
||||
} else {
|
||||
|
||||
token = new state.Token('span_open', 'span', 1);
|
||||
token.attrs = [['class', 'hashtag']];
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = hashtag;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('span_close', 'span', -1);
|
||||
buffer.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
const REGEX = /#([\w-:]{1,101})/gi;
|
||||
|
||||
function allowedBoundary(content, index, utils) {
|
||||
let code = content.charCodeAt(index);
|
||||
return (utils.isWhiteSpace(code) || utils.isPunctChar(String.fromCharCode(code)));
|
||||
}
|
||||
|
||||
function applyHashtag(content, state) {
|
||||
let result = null,
|
||||
match,
|
||||
pos = 0;
|
||||
|
||||
while (match = REGEX.exec(content)) {
|
||||
// check boundary
|
||||
if (match.index > 0) {
|
||||
if (!allowedBoundary(content, match.index-1, state.md.utils)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// check forward boundary as well
|
||||
if (match.index + match[0].length < content.length) {
|
||||
if (!allowedBoundary(content, match.index + match[0].length, state.md.utils)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (match.index > pos) {
|
||||
result = result || [];
|
||||
let token = new state.Token('text', '', 0);
|
||||
token.content = content.slice(pos, match.index);
|
||||
result.push(token);
|
||||
}
|
||||
|
||||
result = result || [];
|
||||
addHashtag(result, match, state);
|
||||
|
||||
pos = match.index + match[0].length;
|
||||
}
|
||||
|
||||
if (result && pos < content.length) {
|
||||
let token = new state.Token('text', '', 0);
|
||||
token.content = content.slice(pos);
|
||||
result.push(token);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.inlineRegexp({
|
||||
start: '#',
|
||||
matcher: /^#([\w-:]{1,101})/i,
|
||||
spaceOrTagBoundary: true,
|
||||
helper.registerPlugin(md=>{
|
||||
|
||||
emitter(matches) {
|
||||
const options = helper.getOptions();
|
||||
const [hashtag, slug] = matches;
|
||||
const categoryHashtagLookup = options.categoryHashtagLookup;
|
||||
const result = categoryHashtagLookup && categoryHashtagLookup(slug);
|
||||
|
||||
return result ? ['a', { class: 'hashtag', href: result[0] }, '#', ["span", {}, result[1]]]
|
||||
: ['span', { class: 'hashtag' }, hashtag];
|
||||
}
|
||||
md.core.ruler.push('category-hashtag', state => md.options.discourse.helpers.textReplace(
|
||||
state, applyHashtag, true /* skip all links */
|
||||
));
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,18 +1,44 @@
|
|||
import { censor } from 'pretty-text/censored-words';
|
||||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
import { censorFn } from 'pretty-text/censored-words';
|
||||
|
||||
registerOption((siteSettings, opts) => {
|
||||
opts.features.censored = true;
|
||||
opts.censoredWords = siteSettings.censored_words;
|
||||
opts.censoredPattern = siteSettings.censored_pattern;
|
||||
});
|
||||
function recurse(tokens, apply) {
|
||||
let i;
|
||||
for(i=0;i<tokens.length;i++) {
|
||||
apply(tokens[i]);
|
||||
if (tokens[i].children) {
|
||||
recurse(tokens[i].children, apply);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function censorTree(state, censor) {
|
||||
if (!state.tokens) {
|
||||
return;
|
||||
}
|
||||
|
||||
recurse(state.tokens, token => {
|
||||
if (token.content) {
|
||||
token.content = censor(token.content);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.addPreProcessor(text => {
|
||||
const options = helper.getOptions();
|
||||
return censor(text, options.censoredWords, options.censoredPattern);
|
||||
helper.registerOptions((opts, siteSettings) => {
|
||||
opts.censoredWords = siteSettings.censored_words;
|
||||
opts.censoredPattern = siteSettings.censored_pattern;
|
||||
});
|
||||
|
||||
helper.registerPlugin(md => {
|
||||
const words = md.options.discourse.censoredWords;
|
||||
const patterns = md.options.discourse.censoredPattern;
|
||||
|
||||
if ((words && words.length > 0) || (patterns && patterns.length > 0)) {
|
||||
const replacement = String.fromCharCode(9632);
|
||||
const censor = censorFn(words, patterns, replacement);
|
||||
md.core.ruler.push('censored', state => censorTree(state, censor));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,27 +1,38 @@
|
|||
import { escape } from 'pretty-text/sanitizer';
|
||||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
// we need a custom renderer for code blocks cause we have a slightly non compliant
|
||||
// format with special handling for text and so on
|
||||
|
||||
// Support for various code blocks
|
||||
const TEXT_CODE_CLASSES = ["text", "pre", "plain"];
|
||||
|
||||
function codeFlattenBlocks(blocks) {
|
||||
let result = "";
|
||||
blocks.forEach(function(b) {
|
||||
result += b;
|
||||
if (b.trailing) { result += b.trailing; }
|
||||
});
|
||||
return result;
|
||||
|
||||
function render(tokens, idx, options, env, slf, md) {
|
||||
let token = tokens[idx],
|
||||
info = token.info ? md.utils.unescapeAll(token.info) : '',
|
||||
langName = md.options.discourse.defaultCodeLang,
|
||||
className,
|
||||
escapedContent = md.utils.escapeHtml(token.content);
|
||||
|
||||
if (info) {
|
||||
// strip off any additional languages
|
||||
info = info.split(/\s+/g)[0];
|
||||
}
|
||||
|
||||
const acceptableCodeClasses = md.options.discourse.acceptableCodeClasses;
|
||||
if (acceptableCodeClasses && info && acceptableCodeClasses.indexOf(info) !== -1) {
|
||||
langName = info;
|
||||
}
|
||||
|
||||
className = TEXT_CODE_CLASSES.indexOf(info) !== -1 ? 'lang-nohighlight' : 'lang-' + langName;
|
||||
|
||||
return `<pre><code class='${className}'>${escapedContent}</code></pre>\n`;
|
||||
}
|
||||
|
||||
registerOption((siteSettings, opts) => {
|
||||
opts.features.code = true;
|
||||
opts.defaultCodeLang = siteSettings.default_code_lang;
|
||||
opts.acceptableCodeClasses = (siteSettings.highlighted_languages || "").split("|").concat(['auto', 'nohighlight']);
|
||||
});
|
||||
|
||||
export function setup(helper) {
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
helper.registerOptions((opts, siteSettings) => {
|
||||
opts.defaultCodeLang = siteSettings.default_code_lang;
|
||||
opts.acceptableCodeClasses = (siteSettings.highlighted_languages || "").split("|").concat(['auto', 'nohighlight']);
|
||||
});
|
||||
|
||||
helper.whiteList({
|
||||
custom(tag, name, value) {
|
||||
|
@ -34,50 +45,7 @@ export function setup(helper) {
|
|||
}
|
||||
});
|
||||
|
||||
helper.replaceBlock({
|
||||
start: /^`{3}([^\n\[\]]+)?\n?([\s\S]*)?/gm,
|
||||
stop: /^```$/gm,
|
||||
withoutLeading: /\[quote/gm, //if leading text contains a quote this should not match
|
||||
emitter(blockContents, matches) {
|
||||
const opts = helper.getOptions();
|
||||
|
||||
let codeLang = opts.defaultCodeLang;
|
||||
const acceptableCodeClasses = opts.acceptableCodeClasses;
|
||||
if (acceptableCodeClasses && matches[1] && acceptableCodeClasses.indexOf(matches[1]) !== -1) {
|
||||
codeLang = matches[1];
|
||||
}
|
||||
|
||||
if (TEXT_CODE_CLASSES.indexOf(matches[1]) !== -1) {
|
||||
return ['p', ['pre', ['code', {'class': 'lang-nohighlight'}, codeFlattenBlocks(blockContents) ]]];
|
||||
} else {
|
||||
return ['p', ['pre', ['code', {'class': 'lang-' + codeLang}, codeFlattenBlocks(blockContents) ]]];
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
helper.replaceBlock({
|
||||
start: /(<pre[^\>]*\>)([\s\S]*)/igm,
|
||||
stop: /<\/pre>/igm,
|
||||
rawContents: true,
|
||||
skipIfTradtionalLinebreaks: true,
|
||||
|
||||
emitter(blockContents) {
|
||||
return ['p', ['pre', codeFlattenBlocks(blockContents)]];
|
||||
}
|
||||
});
|
||||
|
||||
// Ensure that content in a code block is fully escaped. This way it's not white listed
|
||||
// and we can use HTML and Javascript examples.
|
||||
helper.onParseNode(function(event) {
|
||||
const node = event.node,
|
||||
path = event.path;
|
||||
|
||||
if (node[0] === 'code') {
|
||||
const regexp = (path && path[path.length-1] && path[path.length-1][0] && path[path.length-1][0] === "pre") ?
|
||||
/ +$/g : /^ +| +$/g;
|
||||
|
||||
const contents = node[node.length-1];
|
||||
node[node.length-1] = escape(contents.replace(regexp,''));
|
||||
}
|
||||
helper.registerPlugin(md=>{
|
||||
md.renderer.rules.fence = (tokens,idx,options,env,slf)=>render(tokens,idx,options,env,slf,md);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,117 +1,246 @@
|
|||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
import { buildEmojiUrl, isCustomEmoji } from 'pretty-text/emoji';
|
||||
import { translations } from 'pretty-text/emoji/data';
|
||||
|
||||
let _unicodeReplacements;
|
||||
let _unicodeRegexp;
|
||||
export function setUnicodeReplacements(replacements) {
|
||||
_unicodeReplacements = replacements;
|
||||
if (replacements) {
|
||||
// We sort and reverse to match longer emoji sequences first
|
||||
_unicodeRegexp = new RegExp(Object.keys(replacements).sort().reverse().join("|"), "g");
|
||||
}
|
||||
};
|
||||
const MAX_NAME_LENGTH = 60;
|
||||
|
||||
function escapeRegExp(s) {
|
||||
return s.replace(/[-/\\^$*+?.()|[\]{}]/gi, '\\$&');
|
||||
let translationTree = null;
|
||||
|
||||
// This allows us to efficiently search for aliases
|
||||
// We build a data structure that allows us to quickly
|
||||
// search through our N next chars to see if any match
|
||||
// one of our alias emojis.
|
||||
//
|
||||
function buildTranslationTree() {
|
||||
let tree = [];
|
||||
let lastNode;
|
||||
|
||||
Object.keys(translations).forEach(function(key){
|
||||
let i;
|
||||
let node = tree;
|
||||
|
||||
for(i=0;i<key.length;i++) {
|
||||
let code = key.charCodeAt(i);
|
||||
let j;
|
||||
|
||||
let found = false;
|
||||
|
||||
for (j=0;j<node.length;j++){
|
||||
if (node[j][0] === code) {
|
||||
node = node[j][1];
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// token, children, value
|
||||
let tmp = [code, []];
|
||||
node.push(tmp);
|
||||
lastNode = tmp;
|
||||
node = tmp[1];
|
||||
}
|
||||
}
|
||||
|
||||
lastNode[1] = translations[key];
|
||||
});
|
||||
|
||||
return tree;
|
||||
}
|
||||
|
||||
function checkPrev(prev) {
|
||||
if (prev && prev.length) {
|
||||
const lastToken = prev[prev.length-1];
|
||||
if (lastToken && lastToken.charAt) {
|
||||
const lastChar = lastToken.charAt(lastToken.length-1);
|
||||
if (!/\W/.test(lastChar)) return false;
|
||||
|
||||
function imageFor(code, opts) {
|
||||
code = code.toLowerCase();
|
||||
const url = buildEmojiUrl(code, opts);
|
||||
if (url) {
|
||||
const title = `:${code}:`;
|
||||
const classes = isCustomEmoji(code, opts) ? "emoji emoji-custom" : "emoji";
|
||||
return {url, title, classes};
|
||||
}
|
||||
}
|
||||
|
||||
function getEmojiName(content, pos, state) {
|
||||
|
||||
if (content.charCodeAt(pos) !== 58) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pos > 0) {
|
||||
let prev = content.charCodeAt(pos-1);
|
||||
if (!state.md.utils.isSpace(prev) && !state.md.utils.isPunctChar(String.fromCharCode(prev))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
pos++;
|
||||
if (content.charCodeAt(pos) === 58) {
|
||||
return;
|
||||
}
|
||||
|
||||
let length = 0;
|
||||
while(length < MAX_NAME_LENGTH) {
|
||||
length++;
|
||||
|
||||
if (content.charCodeAt(pos+length) === 58) {
|
||||
// check for t2-t6
|
||||
if (content.substr(pos+length+1, 3).match(/t[2-6]:/)) {
|
||||
length += 3;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pos+length > content.length) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (length === MAX_NAME_LENGTH) {
|
||||
return;
|
||||
}
|
||||
|
||||
return content.substr(pos, length);
|
||||
}
|
||||
|
||||
registerOption((siteSettings, opts, state) => {
|
||||
opts.features.emoji = !!siteSettings.enable_emoji;
|
||||
opts.emojiSet = siteSettings.emoji_set || "";
|
||||
opts.customEmoji = state.customEmoji;
|
||||
});
|
||||
// straight forward :smile: to emoji image
|
||||
function getEmojiTokenByName(name, state) {
|
||||
|
||||
let info;
|
||||
if (info = imageFor(name, state.md.options.discourse)) {
|
||||
let token = new state.Token('emoji', 'img', 0);
|
||||
token.attrs = [['src', info.url],
|
||||
['title', info.title],
|
||||
['class', info.classes],
|
||||
['alt', info.title]];
|
||||
|
||||
return token;
|
||||
}
|
||||
}
|
||||
|
||||
function getEmojiTokenByTranslation(content, pos, state) {
|
||||
|
||||
translationTree = translationTree || buildTranslationTree();
|
||||
|
||||
let currentTree = translationTree;
|
||||
|
||||
let i;
|
||||
let search = true;
|
||||
let found = false;
|
||||
let start = pos;
|
||||
|
||||
while(search) {
|
||||
|
||||
search = false;
|
||||
let code = content.charCodeAt(pos);
|
||||
|
||||
for (i=0;i<currentTree.length;i++) {
|
||||
if(currentTree[i][0] === code) {
|
||||
currentTree = currentTree[i][1];
|
||||
pos++;
|
||||
search = true;
|
||||
if (typeof currentTree === "string") {
|
||||
found = currentTree;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
return;
|
||||
}
|
||||
|
||||
// quick boundary check
|
||||
if (start > 0) {
|
||||
let leading = content.charAt(start-1);
|
||||
if (!state.md.utils.isSpace(leading.charCodeAt(0)) && !state.md.utils.isPunctChar(leading)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// check trailing for punct or space
|
||||
if (pos < content.length) {
|
||||
let trailing = content.charCodeAt(pos);
|
||||
if (!state.md.utils.isSpace(trailing)){
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let token = getEmojiTokenByName(found, state);
|
||||
if (token) {
|
||||
return { pos, token };
|
||||
}
|
||||
}
|
||||
|
||||
function applyEmoji(content, state, emojiUnicodeReplacer) {
|
||||
let i;
|
||||
let result = null;
|
||||
let contentToken = null;
|
||||
|
||||
let start = 0;
|
||||
|
||||
if (emojiUnicodeReplacer) {
|
||||
content = emojiUnicodeReplacer(content);
|
||||
}
|
||||
|
||||
let endToken = content.length;
|
||||
|
||||
for (i=0; i<content.length-1; i++) {
|
||||
let offset = 0;
|
||||
let emojiName = getEmojiName(content,i,state);
|
||||
let token = null;
|
||||
|
||||
if (emojiName) {
|
||||
token = getEmojiTokenByName(emojiName, state);
|
||||
if (token) {
|
||||
offset = emojiName.length+2;
|
||||
}
|
||||
}
|
||||
|
||||
if (!token) {
|
||||
// handle aliases (note: we can't do this in inline cause ; is not a split point)
|
||||
//
|
||||
let info = getEmojiTokenByTranslation(content, i, state);
|
||||
|
||||
if (info) {
|
||||
offset = info.pos - i;
|
||||
token = info.token;
|
||||
}
|
||||
}
|
||||
|
||||
if (token) {
|
||||
result = result || [];
|
||||
if (i-start>0) {
|
||||
contentToken = new state.Token('text', '', 0);
|
||||
contentToken.content = content.slice(start,i);
|
||||
result.push(contentToken);
|
||||
}
|
||||
|
||||
result.push(token);
|
||||
endToken = start = i + offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (endToken < content.length) {
|
||||
contentToken = new state.Token('text', '', 0);
|
||||
contentToken.content = content.slice(endToken);
|
||||
result.push(contentToken);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.whiteList('img.emoji');
|
||||
|
||||
function imageFor(code) {
|
||||
code = code.toLowerCase();
|
||||
const opts = helper.getOptions();
|
||||
const url = buildEmojiUrl(code, opts);
|
||||
if (url) {
|
||||
const title = `:${code}:`;
|
||||
const classes = isCustomEmoji(code, opts) ? "emoji emoji-custom" : "emoji";
|
||||
return ['img', { href: url, title, 'class': classes, alt: title }];
|
||||
}
|
||||
}
|
||||
|
||||
const translationsWithColon = {};
|
||||
Object.keys(translations).forEach(t => {
|
||||
if (t[0] === ':') {
|
||||
translationsWithColon[t] = translations[t];
|
||||
} else {
|
||||
const replacement = translations[t];
|
||||
helper.inlineReplace(t, (token, match, prev) => {
|
||||
return checkPrev(prev) ? imageFor(replacement) : token;
|
||||
});
|
||||
}
|
||||
});
|
||||
const translationColonRegexp = new RegExp(Object.keys(translationsWithColon).map(t => `(${escapeRegExp(t)})`).join("|"));
|
||||
|
||||
helper.registerInline(':', (text, match, prev) => {
|
||||
const endPos = text.indexOf(':', 1);
|
||||
const firstSpace = text.search(/\s/);
|
||||
if (!checkPrev(prev)) { return; }
|
||||
|
||||
// If there is no trailing colon, check our translations that begin with colons
|
||||
if (endPos === -1 || (firstSpace !== -1 && endPos > firstSpace)) {
|
||||
translationColonRegexp.lastIndex = 0;
|
||||
const m = translationColonRegexp.exec(text);
|
||||
if (m && m[0] && text.indexOf(m[0]) === 0) {
|
||||
// Check outer edge
|
||||
const lastChar = text.charAt(m[0].length);
|
||||
if (lastChar && !/\s/.test(lastChar)) return;
|
||||
const contents = imageFor(translationsWithColon[m[0]]);
|
||||
if (contents) {
|
||||
return [m[0].length, contents];
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let between;
|
||||
const emojiNameMatch = text.match(/(?:.*?)(:(?!:).?[\w-]*(?::t\d)?:)/);
|
||||
if (emojiNameMatch) {
|
||||
between = emojiNameMatch[0].slice(1, -1);
|
||||
} else {
|
||||
between = text.slice(1, -1);
|
||||
}
|
||||
|
||||
const contents = imageFor(between);
|
||||
if (contents) {
|
||||
return [text.indexOf(between, 1) + between.length + 1, contents];
|
||||
}
|
||||
helper.registerOptions((opts, siteSettings, state)=>{
|
||||
opts.features.emoji = !!siteSettings.enable_emoji;
|
||||
opts.emojiSet = siteSettings.emoji_set || "";
|
||||
opts.customEmoji = state.customEmoji;
|
||||
});
|
||||
|
||||
helper.addPreProcessor(text => {
|
||||
if (_unicodeReplacements) {
|
||||
_unicodeRegexp.lastIndex = 0;
|
||||
|
||||
let m;
|
||||
while ((m = _unicodeRegexp.exec(text)) !== null) {
|
||||
let replacement = ":" + _unicodeReplacements[m[0]] + ":";
|
||||
const before = text.charAt(m.index-1);
|
||||
if (!/\B/.test(before)) {
|
||||
replacement = "\u200b" + replacement;
|
||||
}
|
||||
text = text.replace(m[0], replacement);
|
||||
}
|
||||
}
|
||||
return text;
|
||||
helper.registerPlugin((md)=>{
|
||||
md.core.ruler.push('emoji', state => md.options.discourse.helpers.textReplace(
|
||||
state, (c,s)=>applyEmoji(c,s,md.options.discourse.emojiUnicodeReplacer))
|
||||
);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
const BLOCK_TAGS = ['address', 'article', 'aside', 'audio', 'blockquote', 'canvas', 'dd', 'details',
|
||||
'div', 'dl', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
|
||||
'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'iframe', 'noscript', 'ol', 'output',
|
||||
'p', 'pre', 'section', 'table', 'tfoot', 'ul', 'video', 'summary'];
|
||||
|
||||
function splitAtLast(tag, block, next, first) {
|
||||
const endTag = `</${tag}>`;
|
||||
let endTagIndex = first ? block.indexOf(endTag) : block.lastIndexOf(endTag);
|
||||
|
||||
if (endTagIndex !== -1) {
|
||||
endTagIndex += endTag.length;
|
||||
|
||||
const trailing = block.substr(endTagIndex).replace(/^\s+/, '');
|
||||
if (trailing.length) {
|
||||
next.unshift(trailing);
|
||||
}
|
||||
|
||||
return [ block.substr(0, endTagIndex) ];
|
||||
}
|
||||
};
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
|
||||
// If a row begins with HTML tags, don't parse it.
|
||||
helper.registerBlock('html', function(block, next) {
|
||||
let split, pos;
|
||||
|
||||
// Fix manual blockquote paragraphing even though it's not strictly correct
|
||||
// PERF NOTE: /\S+<blockquote/ is a perf hog for search, try on huge string
|
||||
if (pos = block.search(/<blockquote/) >= 0) {
|
||||
if(block.substring(0, pos).search(/\s/) === -1) {
|
||||
split = splitAtLast('blockquote', block, next, true);
|
||||
if (split) { return this.processInline(split[0]); }
|
||||
}
|
||||
}
|
||||
|
||||
const m = /^\s*<\/?([^>]+)\>/.exec(block);
|
||||
if (m && m[1]) {
|
||||
const tag = m[1].split(/\s/);
|
||||
if (tag && tag[0] && BLOCK_TAGS.indexOf(tag[0]) !== -1) {
|
||||
split = splitAtLast(tag[0], block, next);
|
||||
if (split) {
|
||||
if (split.length === 1 && split[0] === block) { return; }
|
||||
return split;
|
||||
}
|
||||
return [ block.toString() ];
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
|
@ -1,51 +1,88 @@
|
|||
/**
|
||||
Supports our custom @mention syntax for calling out a user in a post.
|
||||
It will add a special class to them, and create a link if the user is found in a
|
||||
local map.
|
||||
**/
|
||||
const regex = /^(\w[\w.-]{0,59})\b/i;
|
||||
|
||||
function applyMentions(state, silent, isWhiteSpace, isPunctChar, mentionLookup, getURL) {
|
||||
|
||||
let pos = state.pos;
|
||||
|
||||
// 64 = @
|
||||
if (silent || state.src.charCodeAt(pos) !== 64) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pos > 0) {
|
||||
let prev = state.src.charCodeAt(pos-1);
|
||||
if (!isWhiteSpace(prev) && !isPunctChar(String.fromCharCode(prev))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// skip if in a link
|
||||
if (state.tokens) {
|
||||
let last = state.tokens[state.tokens.length-1];
|
||||
if (last) {
|
||||
if (last.type === 'link_open') {
|
||||
return false;
|
||||
}
|
||||
if (last.type === 'html_inline' && last.content.substr(0,2) === "<a") {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let maxMention = state.src.substr(pos+1, 60);
|
||||
|
||||
let matches = maxMention.match(regex);
|
||||
|
||||
if (!matches) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let username = matches[1];
|
||||
|
||||
let type = mentionLookup && mentionLookup(username);
|
||||
|
||||
let tag = 'a';
|
||||
let className = 'mention';
|
||||
let href = null;
|
||||
|
||||
if (type === 'user') {
|
||||
href = getURL('/u/') + username.toLowerCase();
|
||||
} else if (type === 'group') {
|
||||
href = getURL('/groups/') + username;
|
||||
className = 'mention-group';
|
||||
} else {
|
||||
tag = 'span';
|
||||
}
|
||||
|
||||
let token = state.push('mention_open', tag, 1);
|
||||
token.attrs = [['class', className]];
|
||||
if (href) {
|
||||
token.attrs.push(['href', href]);
|
||||
}
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = '@'+username;
|
||||
|
||||
state.push('mention_close', tag, -1);
|
||||
|
||||
state.pos = pos + username.length + 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
// We have to prune @mentions that are within links.
|
||||
helper.onParseNode(event => {
|
||||
const node = event.node,
|
||||
path = event.path;
|
||||
|
||||
if (node[1] && node[1]["class"] === 'mention') {
|
||||
const parent = path[path.length - 1];
|
||||
|
||||
// If the parent is an 'a', remove it
|
||||
if (parent && parent[0] === 'a') {
|
||||
const name = node[2];
|
||||
node.length = 0;
|
||||
node[0] = "__RAW";
|
||||
node[1] = name;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
helper.inlineRegexp({
|
||||
start: '@',
|
||||
// NOTE: since we can't use SiteSettings here (they loads later in process)
|
||||
// we are being less strict to account for more cases than allowed
|
||||
matcher: /^@(\w[\w.-]{0,59})\b/i,
|
||||
wordBoundary: true,
|
||||
|
||||
emitter(matches) {
|
||||
const mention = matches[0].trim();
|
||||
const name = matches[1];
|
||||
const opts = helper.getOptions();
|
||||
const mentionLookup = opts.mentionLookup;
|
||||
|
||||
const type = mentionLookup && mentionLookup(name);
|
||||
if (type === "user") {
|
||||
return ['a', {'class': 'mention', href: opts.getURL("/u/") + name.toLowerCase()}, mention];
|
||||
} else if (type === "group") {
|
||||
return ['a', {'class': 'mention-group', href: opts.getURL("/groups/") + name}, mention];
|
||||
} else {
|
||||
return ['span', {'class': 'mention'}, mention];
|
||||
}
|
||||
}
|
||||
helper.registerPlugin(md => {
|
||||
md.inline.ruler.push('mentions', (state,silent)=> applyMentions(
|
||||
state,
|
||||
silent,
|
||||
md.utils.isWhiteSpace,
|
||||
md.utils.isPunctChar,
|
||||
md.options.discourse.mentionLookup,
|
||||
md.options.discourse.getURL
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -1,30 +1,53 @@
|
|||
// Support for the newline behavior in markdown that most expect. Look through all text nodes
|
||||
// in the tree, replace any new lines with `br`s.
|
||||
// see: https://github.com/markdown-it/markdown-it/issues/375
|
||||
//
|
||||
// we use a custom paragraph rule cause we have to signal when a
|
||||
// link starts with a space, so we can bypass a onebox
|
||||
// this is a freedom patch, so careful, may break on updates
|
||||
|
||||
|
||||
function newline(state, silent) {
|
||||
var token, pmax, max, pos = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x0A/* \n */) { return false; }
|
||||
|
||||
pmax = state.pending.length - 1;
|
||||
max = state.posMax;
|
||||
|
||||
// ' \n' -> hardbreak
|
||||
// Lookup in pending chars is bad practice! Don't copy to other rules!
|
||||
// Pending string is stored in concat mode, indexed lookups will cause
|
||||
// convertion to flat mode.
|
||||
if (!silent) {
|
||||
if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
|
||||
if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
|
||||
state.pending = state.pending.replace(/ +$/, '');
|
||||
token = state.push('hardbreak', 'br', 0);
|
||||
} else {
|
||||
state.pending = state.pending.slice(0, -1);
|
||||
token = state.push('softbreak', 'br', 0);
|
||||
}
|
||||
|
||||
} else {
|
||||
token = state.push('softbreak', 'br', 0);
|
||||
}
|
||||
}
|
||||
|
||||
pos++;
|
||||
|
||||
// skip heading spaces for next line
|
||||
while (pos < max && state.md.utils.isSpace(state.src.charCodeAt(pos))) {
|
||||
if (token) {
|
||||
token.leading_space = true;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
return true;
|
||||
};
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
|
||||
helper.postProcessText((text, event) => {
|
||||
const { options, insideCounts } = event;
|
||||
if (options.traditionalMarkdownLinebreaks || (insideCounts.pre > 0)) { return; }
|
||||
|
||||
if (text === "\n") {
|
||||
// If the tag is just a new line, replace it with a `<br>`
|
||||
return [['br']];
|
||||
} else {
|
||||
// If the text node contains new lines, perhaps with text between them, insert the
|
||||
// `<br>` tags.
|
||||
const split = text.split(/\n+/);
|
||||
if (split.length) {
|
||||
const replacement = [];
|
||||
for (var i=0; i<split.length; i++) {
|
||||
if (split[i].length > 0) { replacement.push(split[i]); }
|
||||
if (i !== split.length-1) { replacement.push(['br']); }
|
||||
}
|
||||
|
||||
return replacement;
|
||||
}
|
||||
}
|
||||
helper.registerPlugin(md => {
|
||||
md.inline.ruler.at('newline', newline);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,71 +1,95 @@
|
|||
import { lookupCache } from 'pretty-text/oneboxer';
|
||||
|
||||
// Given a node in the document and its parent, determine whether it is on its own line or not.
|
||||
function isOnOneLine(link, parent) {
|
||||
if (!parent) { return false; }
|
||||
|
||||
const siblings = parent.slice(1);
|
||||
if ((!siblings) || (siblings.length < 1)) { return false; }
|
||||
|
||||
const idx = siblings.indexOf(link);
|
||||
if (idx === -1) { return false; }
|
||||
|
||||
if (idx > 0) {
|
||||
const prev = siblings[idx-1];
|
||||
if (prev[0] !== 'br') { return false; }
|
||||
function applyOnebox(state, silent) {
|
||||
if (silent || !state.tokens || state.tokens.length < 3) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (idx < siblings.length) {
|
||||
const next = siblings[idx+1];
|
||||
if (next && (!((next[0] === 'br') || (typeof next === 'string' && next.trim() === "")))) { return false; }
|
||||
}
|
||||
let i;
|
||||
for(i=1;i<state.tokens.length;i++) {
|
||||
let token = state.tokens[i];
|
||||
|
||||
return true;
|
||||
}
|
||||
let prev = state.tokens[i-1];
|
||||
let prevAccepted = prev.type === "paragraph_open" && prev.level === 0;
|
||||
|
||||
// We only onebox stuff that is on its own line.
|
||||
export function setup(helper) {
|
||||
if (token.type === "inline" && prevAccepted) {
|
||||
let j;
|
||||
for(j=0;j<token.children.length;j++){
|
||||
let child = token.children[j];
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (child.type === "link_open" && child.markup === 'linkify' && child.info === 'auto') {
|
||||
|
||||
helper.onParseNode(event => {
|
||||
const node = event.node,
|
||||
path = event.path;
|
||||
if (j === 0 && token.leading_space) {
|
||||
continue;
|
||||
} else if (j > 0) {
|
||||
|
||||
// We only care about links
|
||||
if (node[0] !== 'a') { return; }
|
||||
let prevSibling = token.children[j-1];
|
||||
|
||||
const parent = path[path.length - 1];
|
||||
if (prevSibling.tag !== 'br' || prevSibling.leading_space) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't onebox bbcode
|
||||
if (node[1]['data-bbcode']) {
|
||||
delete node[1]['data-bbcode'];
|
||||
return;
|
||||
}
|
||||
// look ahead for soft or hard break
|
||||
let text = token.children[j+1];
|
||||
let close = token.children[j+2];
|
||||
let lookahead = token.children[j+3];
|
||||
|
||||
// We don't onebox mentions
|
||||
if (node[1]['class'] === 'mention') { return; }
|
||||
if (lookahead && lookahead.tag !== 'br') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't onebox links within a list
|
||||
for (var i=0; i<path.length; i++) {
|
||||
if (path[i][0] === 'li') { return; }
|
||||
}
|
||||
// check attrs only include a href
|
||||
let attrs = child["attrs"];
|
||||
|
||||
// If the link has a different label text than the link itself, don't onebox it.
|
||||
const label = node[node.length-1];
|
||||
if (label !== node[1]['href']) { return; }
|
||||
if (!attrs || attrs.length !== 1 || attrs[0][0] !== "href") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isOnOneLine(node, parent)) {
|
||||
// edge case ... what if this is not http or protocoless?
|
||||
if (!/^http|^\/\//i.test(attrs[0][1])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
node[1]['class'] = 'onebox';
|
||||
node[1].target = '_blank';
|
||||
// we already know text matches cause it is an auto link
|
||||
if (!close || close.type !== "link_close") {
|
||||
continue;
|
||||
}
|
||||
|
||||
const contents = lookupCache(node[1].href);
|
||||
if (contents) {
|
||||
node[0] = '__RAW';
|
||||
node[1] = contents;
|
||||
node.length = 2;
|
||||
// we already determined earlier that 0 0 was href
|
||||
let cached = lookupCache(attrs[0][1]);
|
||||
|
||||
if (cached) {
|
||||
// replace link with 2 blank text nodes and inline html for onebox
|
||||
child.type = 'html_raw';
|
||||
child.content = cached;
|
||||
child.inline = true;
|
||||
|
||||
text.type = 'html_raw';
|
||||
text.content = '';
|
||||
text.inline = true;
|
||||
|
||||
close.type = 'html_raw';
|
||||
close.content = '';
|
||||
close.inline = true;
|
||||
|
||||
} else {
|
||||
// decorate...
|
||||
attrs.push(["class", "onebox"]);
|
||||
attrs.push(["target", "_blank"]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerPlugin(md => {
|
||||
md.core.ruler.after('linkify', 'onebox', applyOnebox);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
import { register } from 'pretty-text/engines/discourse-markdown/bbcode';
|
||||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
import { performEmojiUnescape } from 'pretty-text/emoji';
|
||||
|
||||
registerOption((siteSettings, opts) => {
|
||||
opts.enableEmoji = siteSettings.enable_emoji;
|
||||
opts.emojiSet = siteSettings.emoji_set;
|
||||
});
|
||||
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
|
||||
register(helper, 'quote', {noWrap: true, singlePara: true}, (contents, bbParams, options) => {
|
||||
|
||||
const params = {'class': 'quote'};
|
||||
let username = null;
|
||||
const opts = helper.getOptions();
|
||||
|
||||
if (bbParams) {
|
||||
const paramsSplit = bbParams.split(/\,\s*/);
|
||||
username = paramsSplit[0];
|
||||
|
||||
paramsSplit.forEach(function(p,i) {
|
||||
if (i > 0) {
|
||||
var assignment = p.split(':');
|
||||
if (assignment[0] && assignment[1]) {
|
||||
const escaped = helper.escape(assignment[0]);
|
||||
// don't escape attributes, makes no sense
|
||||
if (escaped === assignment[0]) {
|
||||
params['data-' + assignment[0]] = helper.escape(assignment[1].trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let avatarImg;
|
||||
const postNumber = parseInt(params['data-post'], 10);
|
||||
const topicId = parseInt(params['data-topic'], 10);
|
||||
|
||||
if (options.lookupAvatarByPostNumber) {
|
||||
// client-side, we can retrieve the avatar from the post
|
||||
avatarImg = options.lookupAvatarByPostNumber(postNumber, topicId);
|
||||
} else if (options.lookupAvatar) {
|
||||
// server-side, we need to lookup the avatar from the username
|
||||
avatarImg = options.lookupAvatar(username);
|
||||
}
|
||||
|
||||
// If there's no username just return a simple quote
|
||||
if (!username) {
|
||||
return ['p', ['aside', params, ['blockquote'].concat(contents)]];
|
||||
}
|
||||
|
||||
const header = ['div', {'class': 'title'},
|
||||
['div', {'class': 'quote-controls'}],
|
||||
avatarImg ? ['__RAW', avatarImg] : "",
|
||||
username ? `${username}:` : "" ];
|
||||
|
||||
if (options.topicId && postNumber && options.getTopicInfo && topicId !== options.topicId) {
|
||||
const topicInfo = options.getTopicInfo(topicId);
|
||||
if (topicInfo) {
|
||||
var href = topicInfo.href;
|
||||
if (postNumber > 0) { href += "/" + postNumber; }
|
||||
// get rid of username said stuff
|
||||
header.pop();
|
||||
|
||||
let title = topicInfo.title;
|
||||
|
||||
if (opts.enableEmoji) {
|
||||
title = performEmojiUnescape(topicInfo.title, {
|
||||
getURL: opts.getURL, emojiSet: opts.emojiSet
|
||||
});
|
||||
}
|
||||
|
||||
header.push(['a', {'href': href}, title]);
|
||||
}
|
||||
}
|
||||
|
||||
return ['aside', params, header, ['blockquote'].concat(contents)];
|
||||
});
|
||||
}
|
|
@ -26,7 +26,7 @@ const rule = {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (split[i].indexOf(/full:\s*true/) === 0) {
|
||||
if (/full:\s*true/.test(split[i])) {
|
||||
full = true;
|
||||
continue;
|
||||
}
|
|
@ -1,35 +1,31 @@
|
|||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
|
||||
function tableFlattenBlocks(blocks) {
|
||||
let result = "";
|
||||
|
||||
blocks.forEach(b => {
|
||||
result += b;
|
||||
if (b.trailing) { result += b.trailing; }
|
||||
});
|
||||
|
||||
// bypass newline insertion
|
||||
return result.replace(/[\n\r]/g, " ");
|
||||
};
|
||||
|
||||
registerOption((siteSettings, opts) => {
|
||||
opts.features.table = !!siteSettings.allow_html_tables;
|
||||
});
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (helper.markdownIt) { return; }
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.whiteList(['table', 'table.md-table', 'tbody', 'thead', 'tr', 'th', 'td']);
|
||||
// this is built in now
|
||||
// TODO: sanitizer needs fixing, does not properly support this yet
|
||||
|
||||
helper.replaceBlock({
|
||||
start: /(<table[^>]*>)([\S\s]*)/igm,
|
||||
stop: /<\/table>/igm,
|
||||
rawContents: true,
|
||||
priority: 1,
|
||||
// we need a custom callback for style handling
|
||||
helper.whiteList({
|
||||
custom: function(tag,attr,val) {
|
||||
if (tag !== 'th' && tag !== 'td') {
|
||||
return false;
|
||||
}
|
||||
|
||||
emitter(contents) {
|
||||
return ['table', {"class": "md-table"}, tableFlattenBlocks.apply(this, [contents])];
|
||||
if (attr !== 'style') {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (val === 'text-align:right' || val === 'text-align:left' || val === 'text-align:center');
|
||||
}
|
||||
});
|
||||
|
||||
helper.whiteList([
|
||||
'table',
|
||||
'tbody',
|
||||
'thead',
|
||||
'tr',
|
||||
'th',
|
||||
'td',
|
||||
]);
|
||||
}
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
function addHashtag(buffer, matches, state) {
|
||||
const options = state.md.options.discourse;
|
||||
const [hashtag, slug] = matches;
|
||||
const categoryHashtagLookup = options.categoryHashtagLookup;
|
||||
const result = categoryHashtagLookup && categoryHashtagLookup(slug);
|
||||
|
||||
let token;
|
||||
|
||||
if (result) {
|
||||
token = new state.Token('link_open', 'a', 1);
|
||||
token.attrs = [['class', 'hashtag'], ['href', result[0]]];
|
||||
token.block = false;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = '#';
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('span_open', 'span', 1);
|
||||
token.block = false;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = result[1];
|
||||
buffer.push(token);
|
||||
|
||||
buffer.push(new state.Token('span_close', 'span', -1));
|
||||
|
||||
buffer.push(new state.Token('link_close', 'a', -1));
|
||||
} else {
|
||||
|
||||
token = new state.Token('span_open', 'span', 1);
|
||||
token.attrs = [['class', 'hashtag']];
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = hashtag;
|
||||
buffer.push(token);
|
||||
|
||||
token = new state.Token('span_close', 'span', -1);
|
||||
buffer.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
const REGEX = /#([\w-:]{1,101})/gi;
|
||||
|
||||
function allowedBoundary(content, index, utils) {
|
||||
let code = content.charCodeAt(index);
|
||||
return (utils.isWhiteSpace(code) || utils.isPunctChar(String.fromCharCode(code)));
|
||||
}
|
||||
|
||||
function applyHashtag(content, state) {
|
||||
let result = null,
|
||||
match,
|
||||
pos = 0;
|
||||
|
||||
while (match = REGEX.exec(content)) {
|
||||
// check boundary
|
||||
if (match.index > 0) {
|
||||
if (!allowedBoundary(content, match.index-1, state.md.utils)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// check forward boundary as well
|
||||
if (match.index + match[0].length < content.length) {
|
||||
if (!allowedBoundary(content, match.index + match[0].length, state.md.utils)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (match.index > pos) {
|
||||
result = result || [];
|
||||
let token = new state.Token('text', '', 0);
|
||||
token.content = content.slice(pos, match.index);
|
||||
result.push(token);
|
||||
}
|
||||
|
||||
result = result || [];
|
||||
addHashtag(result, match, state);
|
||||
|
||||
pos = match.index + match[0].length;
|
||||
}
|
||||
|
||||
if (result && pos < content.length) {
|
||||
let token = new state.Token('text', '', 0);
|
||||
token.content = content.slice(pos);
|
||||
result.push(token);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerPlugin(md=>{
|
||||
|
||||
md.core.ruler.push('category-hashtag', state => md.options.discourse.helpers.textReplace(
|
||||
state, applyHashtag, true /* skip all links */
|
||||
));
|
||||
});
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
import { censorFn } from 'pretty-text/censored-words';
|
||||
|
||||
function recurse(tokens, apply) {
|
||||
let i;
|
||||
for(i=0;i<tokens.length;i++) {
|
||||
apply(tokens[i]);
|
||||
if (tokens[i].children) {
|
||||
recurse(tokens[i].children, apply);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function censorTree(state, censor) {
|
||||
if (!state.tokens) {
|
||||
return;
|
||||
}
|
||||
|
||||
recurse(state.tokens, token => {
|
||||
if (token.content) {
|
||||
token.content = censor(token.content);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerOptions((opts, siteSettings) => {
|
||||
opts.censoredWords = siteSettings.censored_words;
|
||||
opts.censoredPattern = siteSettings.censored_pattern;
|
||||
});
|
||||
|
||||
helper.registerPlugin(md => {
|
||||
const words = md.options.discourse.censoredWords;
|
||||
const patterns = md.options.discourse.censoredPattern;
|
||||
|
||||
if ((words && words.length > 0) || (patterns && patterns.length > 0)) {
|
||||
const replacement = String.fromCharCode(9632);
|
||||
const censor = censorFn(words, patterns, replacement);
|
||||
md.core.ruler.push('censored', state => censorTree(state, censor));
|
||||
}
|
||||
});
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
// we need a custom renderer for code blocks cause we have a slightly non compliant
|
||||
// format with special handling for text and so on
|
||||
|
||||
const TEXT_CODE_CLASSES = ["text", "pre", "plain"];
|
||||
|
||||
|
||||
function render(tokens, idx, options, env, slf, md) {
|
||||
let token = tokens[idx],
|
||||
info = token.info ? md.utils.unescapeAll(token.info) : '',
|
||||
langName = md.options.discourse.defaultCodeLang,
|
||||
className,
|
||||
escapedContent = md.utils.escapeHtml(token.content);
|
||||
|
||||
if (info) {
|
||||
// strip off any additional languages
|
||||
info = info.split(/\s+/g)[0];
|
||||
}
|
||||
|
||||
const acceptableCodeClasses = md.options.discourse.acceptableCodeClasses;
|
||||
if (acceptableCodeClasses && info && acceptableCodeClasses.indexOf(info) !== -1) {
|
||||
langName = info;
|
||||
}
|
||||
|
||||
className = TEXT_CODE_CLASSES.indexOf(info) !== -1 ? 'lang-nohighlight' : 'lang-' + langName;
|
||||
|
||||
return `<pre><code class='${className}'>${escapedContent}</code></pre>\n`;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerOptions((opts, siteSettings) => {
|
||||
opts.defaultCodeLang = siteSettings.default_code_lang;
|
||||
opts.acceptableCodeClasses = (siteSettings.highlighted_languages || "").split("|").concat(['auto', 'nohighlight']);
|
||||
});
|
||||
|
||||
helper.whiteList({
|
||||
custom(tag, name, value) {
|
||||
if (tag === 'code' && name === 'class') {
|
||||
const m = /^lang\-(.+)$/.exec(value);
|
||||
if (m) {
|
||||
return helper.getOptions().acceptableCodeClasses.indexOf(m[1]) !== -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
helper.registerPlugin(md=>{
|
||||
md.renderer.rules.fence = (tokens,idx,options,env,slf)=>render(tokens,idx,options,env,slf,md);
|
||||
});
|
||||
}
|
|
@ -1,246 +0,0 @@
|
|||
import { buildEmojiUrl, isCustomEmoji } from 'pretty-text/emoji';
|
||||
import { translations } from 'pretty-text/emoji/data';
|
||||
|
||||
const MAX_NAME_LENGTH = 60;
|
||||
|
||||
let translationTree = null;
|
||||
|
||||
// This allows us to efficiently search for aliases
|
||||
// We build a data structure that allows us to quickly
|
||||
// search through our N next chars to see if any match
|
||||
// one of our alias emojis.
|
||||
//
|
||||
function buildTranslationTree() {
|
||||
let tree = [];
|
||||
let lastNode;
|
||||
|
||||
Object.keys(translations).forEach(function(key){
|
||||
let i;
|
||||
let node = tree;
|
||||
|
||||
for(i=0;i<key.length;i++) {
|
||||
let code = key.charCodeAt(i);
|
||||
let j;
|
||||
|
||||
let found = false;
|
||||
|
||||
for (j=0;j<node.length;j++){
|
||||
if (node[j][0] === code) {
|
||||
node = node[j][1];
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// token, children, value
|
||||
let tmp = [code, []];
|
||||
node.push(tmp);
|
||||
lastNode = tmp;
|
||||
node = tmp[1];
|
||||
}
|
||||
}
|
||||
|
||||
lastNode[1] = translations[key];
|
||||
});
|
||||
|
||||
return tree;
|
||||
}
|
||||
|
||||
|
||||
function imageFor(code, opts) {
|
||||
code = code.toLowerCase();
|
||||
const url = buildEmojiUrl(code, opts);
|
||||
if (url) {
|
||||
const title = `:${code}:`;
|
||||
const classes = isCustomEmoji(code, opts) ? "emoji emoji-custom" : "emoji";
|
||||
return {url, title, classes};
|
||||
}
|
||||
}
|
||||
|
||||
function getEmojiName(content, pos, state) {
|
||||
|
||||
if (content.charCodeAt(pos) !== 58) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pos > 0) {
|
||||
let prev = content.charCodeAt(pos-1);
|
||||
if (!state.md.utils.isSpace(prev) && !state.md.utils.isPunctChar(String.fromCharCode(prev))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pos++;
|
||||
if (content.charCodeAt(pos) === 58) {
|
||||
return;
|
||||
}
|
||||
|
||||
let length = 0;
|
||||
while(length < MAX_NAME_LENGTH) {
|
||||
length++;
|
||||
|
||||
if (content.charCodeAt(pos+length) === 58) {
|
||||
// check for t2-t6
|
||||
if (content.substr(pos+length+1, 3).match(/t[2-6]:/)) {
|
||||
length += 3;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pos+length > content.length) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (length === MAX_NAME_LENGTH) {
|
||||
return;
|
||||
}
|
||||
|
||||
return content.substr(pos, length);
|
||||
}
|
||||
|
||||
// straight forward :smile: to emoji image
|
||||
function getEmojiTokenByName(name, state) {
|
||||
|
||||
let info;
|
||||
if (info = imageFor(name, state.md.options.discourse)) {
|
||||
let token = new state.Token('emoji', 'img', 0);
|
||||
token.attrs = [['src', info.url],
|
||||
['title', info.title],
|
||||
['class', info.classes],
|
||||
['alt', info.title]];
|
||||
|
||||
return token;
|
||||
}
|
||||
}
|
||||
|
||||
function getEmojiTokenByTranslation(content, pos, state) {
|
||||
|
||||
translationTree = translationTree || buildTranslationTree();
|
||||
|
||||
let currentTree = translationTree;
|
||||
|
||||
let i;
|
||||
let search = true;
|
||||
let found = false;
|
||||
let start = pos;
|
||||
|
||||
while(search) {
|
||||
|
||||
search = false;
|
||||
let code = content.charCodeAt(pos);
|
||||
|
||||
for (i=0;i<currentTree.length;i++) {
|
||||
if(currentTree[i][0] === code) {
|
||||
currentTree = currentTree[i][1];
|
||||
pos++;
|
||||
search = true;
|
||||
if (typeof currentTree === "string") {
|
||||
found = currentTree;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
return;
|
||||
}
|
||||
|
||||
// quick boundary check
|
||||
if (start > 0) {
|
||||
let leading = content.charAt(start-1);
|
||||
if (!state.md.utils.isSpace(leading.charCodeAt(0)) && !state.md.utils.isPunctChar(leading)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// check trailing for punct or space
|
||||
if (pos < content.length) {
|
||||
let trailing = content.charCodeAt(pos);
|
||||
if (!state.md.utils.isSpace(trailing)){
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let token = getEmojiTokenByName(found, state);
|
||||
if (token) {
|
||||
return { pos, token };
|
||||
}
|
||||
}
|
||||
|
||||
function applyEmoji(content, state, emojiUnicodeReplacer) {
|
||||
let i;
|
||||
let result = null;
|
||||
let contentToken = null;
|
||||
|
||||
let start = 0;
|
||||
|
||||
if (emojiUnicodeReplacer) {
|
||||
content = emojiUnicodeReplacer(content);
|
||||
}
|
||||
|
||||
let endToken = content.length;
|
||||
|
||||
for (i=0; i<content.length-1; i++) {
|
||||
let offset = 0;
|
||||
let emojiName = getEmojiName(content,i,state);
|
||||
let token = null;
|
||||
|
||||
if (emojiName) {
|
||||
token = getEmojiTokenByName(emojiName, state);
|
||||
if (token) {
|
||||
offset = emojiName.length+2;
|
||||
}
|
||||
}
|
||||
|
||||
if (!token) {
|
||||
// handle aliases (note: we can't do this in inline cause ; is not a split point)
|
||||
//
|
||||
let info = getEmojiTokenByTranslation(content, i, state);
|
||||
|
||||
if (info) {
|
||||
offset = info.pos - i;
|
||||
token = info.token;
|
||||
}
|
||||
}
|
||||
|
||||
if (token) {
|
||||
result = result || [];
|
||||
if (i-start>0) {
|
||||
contentToken = new state.Token('text', '', 0);
|
||||
contentToken.content = content.slice(start,i);
|
||||
result.push(contentToken);
|
||||
}
|
||||
|
||||
result.push(token);
|
||||
endToken = start = i + offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (endToken < content.length) {
|
||||
contentToken = new state.Token('text', '', 0);
|
||||
contentToken.content = content.slice(endToken);
|
||||
result.push(contentToken);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerOptions((opts, siteSettings, state)=>{
|
||||
opts.features.emoji = !!siteSettings.enable_emoji;
|
||||
opts.emojiSet = siteSettings.emoji_set || "";
|
||||
opts.customEmoji = state.customEmoji;
|
||||
});
|
||||
|
||||
helper.registerPlugin((md)=>{
|
||||
md.core.ruler.push('emoji', state => md.options.discourse.helpers.textReplace(
|
||||
state, (c,s)=>applyEmoji(c,s,md.options.discourse.emojiUnicodeReplacer))
|
||||
);
|
||||
});
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
const regex = /^(\w[\w.-]{0,59})\b/i;
|
||||
|
||||
function applyMentions(state, silent, isWhiteSpace, isPunctChar, mentionLookup, getURL) {
|
||||
|
||||
let pos = state.pos;
|
||||
|
||||
// 64 = @
|
||||
if (silent || state.src.charCodeAt(pos) !== 64) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pos > 0) {
|
||||
let prev = state.src.charCodeAt(pos-1);
|
||||
if (!isWhiteSpace(prev) && !isPunctChar(String.fromCharCode(prev))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// skip if in a link
|
||||
if (state.tokens) {
|
||||
let last = state.tokens[state.tokens.length-1];
|
||||
if (last) {
|
||||
if (last.type === 'link_open') {
|
||||
return false;
|
||||
}
|
||||
if (last.type === 'html_inline' && last.content.substr(0,2) === "<a") {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let maxMention = state.src.substr(pos+1, 60);
|
||||
|
||||
let matches = maxMention.match(regex);
|
||||
|
||||
if (!matches) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let username = matches[1];
|
||||
|
||||
let type = mentionLookup && mentionLookup(username);
|
||||
|
||||
let tag = 'a';
|
||||
let className = 'mention';
|
||||
let href = null;
|
||||
|
||||
if (type === 'user') {
|
||||
href = getURL('/u/') + username.toLowerCase();
|
||||
} else if (type === 'group') {
|
||||
href = getURL('/groups/') + username;
|
||||
className = 'mention-group';
|
||||
} else {
|
||||
tag = 'span';
|
||||
}
|
||||
|
||||
let token = state.push('mention_open', tag, 1);
|
||||
token.attrs = [['class', className]];
|
||||
if (href) {
|
||||
token.attrs.push(['href', href]);
|
||||
}
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = '@'+username;
|
||||
|
||||
state.push('mention_close', tag, -1);
|
||||
|
||||
state.pos = pos + username.length + 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerPlugin(md => {
|
||||
md.inline.ruler.push('mentions', (state,silent)=> applyMentions(
|
||||
state,
|
||||
silent,
|
||||
md.utils.isWhiteSpace,
|
||||
md.utils.isPunctChar,
|
||||
md.options.discourse.mentionLookup,
|
||||
md.options.discourse.getURL
|
||||
));
|
||||
});
|
||||
}
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
// see: https://github.com/markdown-it/markdown-it/issues/375
|
||||
//
|
||||
// we use a custom paragraph rule cause we have to signal when a
|
||||
// link starts with a space, so we can bypass a onebox
|
||||
// this is a freedom patch, so careful, may break on updates
|
||||
|
||||
|
||||
function newline(state, silent) {
|
||||
var token, pmax, max, pos = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x0A/* \n */) { return false; }
|
||||
|
||||
pmax = state.pending.length - 1;
|
||||
max = state.posMax;
|
||||
|
||||
// ' \n' -> hardbreak
|
||||
// Lookup in pending chars is bad practice! Don't copy to other rules!
|
||||
// Pending string is stored in concat mode, indexed lookups will cause
|
||||
// convertion to flat mode.
|
||||
if (!silent) {
|
||||
if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
|
||||
if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
|
||||
state.pending = state.pending.replace(/ +$/, '');
|
||||
token = state.push('hardbreak', 'br', 0);
|
||||
} else {
|
||||
state.pending = state.pending.slice(0, -1);
|
||||
token = state.push('softbreak', 'br', 0);
|
||||
}
|
||||
|
||||
} else {
|
||||
token = state.push('softbreak', 'br', 0);
|
||||
}
|
||||
}
|
||||
|
||||
pos++;
|
||||
|
||||
// skip heading spaces for next line
|
||||
while (pos < max && state.md.utils.isSpace(state.src.charCodeAt(pos))) {
|
||||
if (token) {
|
||||
token.leading_space = true;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
return true;
|
||||
};
|
||||
|
||||
export function setup(helper) {
|
||||
helper.registerPlugin(md => {
|
||||
md.inline.ruler.at('newline', newline);
|
||||
});
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
import { lookupCache } from 'pretty-text/oneboxer';
|
||||
|
||||
function applyOnebox(state, silent) {
|
||||
if (silent || !state.tokens || state.tokens.length < 3) {
|
||||
return;
|
||||
}
|
||||
|
||||
let i;
|
||||
for(i=1;i<state.tokens.length;i++) {
|
||||
let token = state.tokens[i];
|
||||
|
||||
let prev = state.tokens[i-1];
|
||||
let prevAccepted = prev.type === "paragraph_open" && prev.level === 0;
|
||||
|
||||
if (token.type === "inline" && prevAccepted) {
|
||||
let j;
|
||||
for(j=0;j<token.children.length;j++){
|
||||
let child = token.children[j];
|
||||
|
||||
if (child.type === "link_open" && child.markup === 'linkify' && child.info === 'auto') {
|
||||
|
||||
if (j === 0 && token.leading_space) {
|
||||
continue;
|
||||
} else if (j > 0) {
|
||||
let prevSibling = token.children[j-1];
|
||||
|
||||
if (prevSibling.tag !== 'br' || prevSibling.leading_space) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// look ahead for soft or hard break
|
||||
let text = token.children[j+1];
|
||||
let close = token.children[j+2];
|
||||
let lookahead = token.children[j+3];
|
||||
|
||||
if (lookahead && lookahead.tag !== 'br') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// check attrs only include a href
|
||||
let attrs = child["attrs"];
|
||||
|
||||
if (!attrs || attrs.length !== 1 || attrs[0][0] !== "href") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// we already know text matches cause it is an auto link
|
||||
|
||||
if (!close || close.type !== "link_close") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// we already determined earlier that 0 0 was href
|
||||
let cached = lookupCache(attrs[0][1]);
|
||||
|
||||
if (cached) {
|
||||
// replace link with 2 blank text nodes and inline html for onebox
|
||||
child.type = 'html_raw';
|
||||
child.content = cached;
|
||||
child.inline = true;
|
||||
|
||||
text.type = 'html_raw';
|
||||
text.content = '';
|
||||
text.inline = true;
|
||||
|
||||
close.type = 'html_raw';
|
||||
close.content = '';
|
||||
close.inline = true;
|
||||
|
||||
} else {
|
||||
// decorate...
|
||||
attrs.push(["class", "onebox"]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
helper.registerPlugin(md => {
|
||||
md.core.ruler.after('linkify', 'onebox', applyOnebox);
|
||||
});
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
export function setup(helper) {
|
||||
|
||||
if (!helper.markdownIt) { return; }
|
||||
|
||||
// this is built in now
|
||||
// TODO: sanitizer needs fixing, does not properly support this yet
|
||||
|
||||
// we need a custom callback for style handling
|
||||
helper.whiteList({
|
||||
custom: function(tag,attr,val) {
|
||||
if (tag !== 'th' && tag !== 'td') {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (attr !== 'style') {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (val === 'text-align:right' || val === 'text-align:left' || val === 'text-align:center');
|
||||
}
|
||||
});
|
||||
|
||||
helper.whiteList([
|
||||
'table',
|
||||
'tbody',
|
||||
'thead',
|
||||
'tr',
|
||||
'th',
|
||||
'td',
|
||||
]);
|
||||
}
|
|
@ -1,13 +1,10 @@
|
|||
import { cook, setup } from 'pretty-text/engines/discourse-markdown';
|
||||
import { cook as cookIt, setup as setupIt } from 'pretty-text/engines/discourse-markdown-it';
|
||||
import { sanitize } from 'pretty-text/sanitizer';
|
||||
import WhiteLister from 'pretty-text/white-lister';
|
||||
|
||||
const _registerFns = [];
|
||||
const identity = value => value;
|
||||
|
||||
export function registerOption(fn) {
|
||||
_registerFns.push(fn);
|
||||
export function registerOption() {
|
||||
// TODO next major version deprecate this
|
||||
// if (window.console) {
|
||||
// window.console.log("registerOption is deprecated");
|
||||
// }
|
||||
}
|
||||
|
||||
export function buildOptions(state) {
|
||||
|
@ -25,11 +22,7 @@ export function buildOptions(state) {
|
|||
emojiUnicodeReplacer
|
||||
} = state;
|
||||
|
||||
if (!siteSettings.enable_experimental_markdown_it) {
|
||||
setup();
|
||||
}
|
||||
|
||||
const features = {
|
||||
let features = {
|
||||
'bold-italics': true,
|
||||
'auto-link': true,
|
||||
'mentions': true,
|
||||
|
@ -41,6 +34,10 @@ export function buildOptions(state) {
|
|||
'newline': !siteSettings.traditional_markdown_linebreaks
|
||||
};
|
||||
|
||||
if (state.features) {
|
||||
features = _.merge(features, state.features);
|
||||
}
|
||||
|
||||
const options = {
|
||||
sanitize: true,
|
||||
getURL,
|
||||
|
@ -56,44 +53,37 @@ export function buildOptions(state) {
|
|||
mentionLookup: state.mentionLookup,
|
||||
emojiUnicodeReplacer,
|
||||
allowedHrefSchemes: siteSettings.allowed_href_schemes ? siteSettings.allowed_href_schemes.split('|') : null,
|
||||
markdownIt: siteSettings.enable_experimental_markdown_it
|
||||
markdownIt: true
|
||||
};
|
||||
|
||||
if (siteSettings.enable_experimental_markdown_it) {
|
||||
setupIt(options, siteSettings, state);
|
||||
} else {
|
||||
// TODO deprecate this
|
||||
_registerFns.forEach(fn => fn(siteSettings, options, state));
|
||||
}
|
||||
// note, this will mutate options due to the way the API is designed
|
||||
// may need a refactor
|
||||
setupIt(options, siteSettings, state);
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
export default class {
|
||||
constructor(opts) {
|
||||
this.opts = opts || {};
|
||||
this.opts.features = this.opts.features || {};
|
||||
this.opts.sanitizer = (!!this.opts.sanitize) ? (this.opts.sanitizer || sanitize) : identity;
|
||||
// We used to do a failsafe call to setup here
|
||||
// under new engine we always expect setup to be called by buildOptions.
|
||||
// setup();
|
||||
if (!opts) {
|
||||
opts = buildOptions({ siteSettings: {}});
|
||||
}
|
||||
this.opts = opts;
|
||||
}
|
||||
|
||||
disableSanitizer() {
|
||||
this.opts.sanitizer = this.opts.discourse.sanitizer = ident => ident;
|
||||
}
|
||||
|
||||
cook(raw) {
|
||||
if (!raw || raw.length === 0) { return ""; }
|
||||
|
||||
let result;
|
||||
|
||||
if (this.opts.markdownIt) {
|
||||
result = cookIt(raw, this.opts);
|
||||
} else {
|
||||
result = cook(raw, this.opts);
|
||||
}
|
||||
|
||||
result = cookIt(raw, this.opts);
|
||||
return result ? result : "";
|
||||
}
|
||||
|
||||
sanitize(html) {
|
||||
return this.opts.sanitizer(html, new WhiteLister(this.opts));
|
||||
return this.opts.sanitizer(html).trim();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,115 +1,112 @@
|
|||
const masterList = {};
|
||||
const masterCallbacks = {};
|
||||
|
||||
const _whiteLists = {};
|
||||
const _callbacks = {};
|
||||
|
||||
function concatUniq(src, elems) {
|
||||
src = src || [];
|
||||
if (!Array.isArray(elems)) {
|
||||
elems = [elems];
|
||||
}
|
||||
return src.concat(elems.filter(e => src.indexOf(e) === -1));
|
||||
}
|
||||
// to match:
|
||||
// abcd
|
||||
// abcd[test]
|
||||
// abcd[test=bob]
|
||||
const WHITELIST_REGEX = /([^\[]+)(\[([^=]+)(=(.*))?\])?/;
|
||||
|
||||
export default class WhiteLister {
|
||||
constructor(options) {
|
||||
options = options || {
|
||||
features: {
|
||||
default: true
|
||||
}
|
||||
};
|
||||
|
||||
options.features.default = true;
|
||||
this._enabled = { "default": true };
|
||||
this._allowedHrefSchemes = (options && options.allowedHrefSchemes) || [];
|
||||
this._rawFeatures = [["default", DEFAULT_LIST]];
|
||||
|
||||
this._featureKeys = Object.keys(options.features).filter(f => options.features[f]);
|
||||
this._key = this._featureKeys.join(':');
|
||||
this._features = options.features;
|
||||
this._options = options;
|
||||
this._cache = null;
|
||||
|
||||
if (options && options.features) {
|
||||
Object.keys(options.features).forEach(f => {
|
||||
if (options.features[f]) {
|
||||
this._enabled[f] = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
getCustom() {
|
||||
if (!_callbacks[this._key]) {
|
||||
const callbacks = [];
|
||||
this._featureKeys.forEach(f => {
|
||||
(masterCallbacks[f] || []).forEach(cb => callbacks.push(cb));
|
||||
});
|
||||
_callbacks[this._key] = callbacks;
|
||||
}
|
||||
whiteListFeature(feature, info) {
|
||||
this._rawFeatures.push([feature, info]);
|
||||
}
|
||||
|
||||
return _callbacks[this._key];
|
||||
disable(feature) {
|
||||
this._enabled[feature] = false;
|
||||
this._cache = null;
|
||||
}
|
||||
|
||||
enable(feature) {
|
||||
this._enabled[feature] = true;
|
||||
this._cache = null;
|
||||
}
|
||||
|
||||
_buildCache() {
|
||||
const tagList = {};
|
||||
const attrList = {};
|
||||
const custom = [];
|
||||
|
||||
this._rawFeatures.forEach(([name, info]) => {
|
||||
if (!this._enabled[name]) return;
|
||||
|
||||
if (info.custom) {
|
||||
custom.push(info.custom);
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof info === "string") {
|
||||
info = [info];
|
||||
}
|
||||
|
||||
(info || []).forEach(tag => {
|
||||
const classes = tag.split('.');
|
||||
const tagWithAttr = classes.shift();
|
||||
|
||||
const m = WHITELIST_REGEX.exec(tagWithAttr);
|
||||
if (m) {
|
||||
|
||||
const [,tagname,,attr,,val] = m;
|
||||
tagList[tagname] = [];
|
||||
|
||||
let attrs = attrList[tagname] = attrList[tagname] || {};
|
||||
if (classes.length > 0) {
|
||||
attrs["class"] = (attrs["class"] || []).concat(classes);
|
||||
}
|
||||
|
||||
if (attr) {
|
||||
let attrInfo = attrs[attr] = attrs[attr] || [];
|
||||
|
||||
if (val) {
|
||||
attrInfo.push(val);
|
||||
} else {
|
||||
attrs[attr] = ["*"];
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
this._cache = {custom, whiteList: {tagList, attrList}};
|
||||
}
|
||||
|
||||
_ensureCache() {
|
||||
if (!this._cache) { this._buildCache(); }
|
||||
}
|
||||
|
||||
getWhiteList() {
|
||||
if (!_whiteLists[this._key]) {
|
||||
const tagList = {};
|
||||
let attrList = {};
|
||||
this._ensureCache();
|
||||
return this._cache.whiteList;
|
||||
}
|
||||
|
||||
// merge whitelists for these features
|
||||
this._featureKeys.forEach(f => {
|
||||
const info = masterList[f] || {};
|
||||
Object.keys(info).forEach(t => {
|
||||
tagList[t] = [];
|
||||
attrList[t] = attrList[t] || {};
|
||||
|
||||
const attrs = info[t];
|
||||
Object.keys(attrs).forEach(a => attrList[t][a] = concatUniq(attrList[t][a], attrs[a]));
|
||||
});
|
||||
});
|
||||
|
||||
_whiteLists[this._key] = { tagList, attrList };
|
||||
}
|
||||
return _whiteLists[this._key];
|
||||
getCustom() {
|
||||
this._ensureCache();
|
||||
return this._cache.custom;
|
||||
}
|
||||
|
||||
getAllowedHrefSchemes() {
|
||||
return this._options.allowedHrefSchemes || [];
|
||||
return this._allowedHrefSchemes;
|
||||
}
|
||||
}
|
||||
|
||||
// Builds our object that represents whether something is sanitized for a particular feature.
|
||||
export function whiteListFeature(feature, info) {
|
||||
const featureInfo = {};
|
||||
|
||||
// we can supply a callback instead
|
||||
if (info.custom) {
|
||||
masterCallbacks[feature] = masterCallbacks[feature] || [];
|
||||
masterCallbacks[feature].push(info.custom);
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof info === "string") { info = [info]; }
|
||||
|
||||
(info || []).forEach(tag => {
|
||||
const classes = tag.split('.');
|
||||
const tagName = classes.shift();
|
||||
const m = /\[([^\]]+)]/.exec(tagName);
|
||||
if (m) {
|
||||
const [full, inside] = m;
|
||||
const stripped = tagName.replace(full, '');
|
||||
const vals = inside.split('=');
|
||||
|
||||
featureInfo[stripped] = featureInfo[stripped] || {};
|
||||
if (vals.length === 2) {
|
||||
const [name, value] = vals;
|
||||
featureInfo[stripped][name] = value;
|
||||
} else {
|
||||
featureInfo[stripped][inside] = '*';
|
||||
}
|
||||
}
|
||||
|
||||
featureInfo[tagName] = featureInfo[tagName] || {};
|
||||
if (classes.length) {
|
||||
featureInfo[tagName]['class'] = concatUniq(featureInfo[tagName]['class'], classes);
|
||||
}
|
||||
});
|
||||
|
||||
masterList[feature] = featureInfo;
|
||||
}
|
||||
|
||||
// Only add to `default` when you always want your whitelist to occur. In other words,
|
||||
// don't change this for a plugin or a feature that can be disabled
|
||||
whiteListFeature('default', [
|
||||
const DEFAULT_LIST = [
|
||||
'a.attachment',
|
||||
'a.hashtag',
|
||||
'a.mention',
|
||||
|
@ -174,4 +171,4 @@ whiteListFeature('default', [
|
|||
'sub',
|
||||
'sup',
|
||||
'ul',
|
||||
]);
|
||||
];
|
||||
|
|
|
@ -42,9 +42,7 @@
|
|||
Discourse.Environment = '<%= Rails.env %>';
|
||||
Discourse.SiteSettings = ps.get('siteSettings');
|
||||
Discourse.LetterAvatarVersion = '<%= LetterAvatar.version %>';
|
||||
<%- if SiteSetting.enable_experimental_markdown_it %>
|
||||
Discourse.MarkdownItURL = '<%= asset_url('markdown-it-bundle.js') %>';
|
||||
<%- end %>
|
||||
I18n.defaultLocale = '<%= SiteSetting.default_locale %>';
|
||||
Discourse.start();
|
||||
Discourse.set('assetVersion','<%= Discourse.assets_digest %>');
|
||||
|
|
|
@ -946,7 +946,6 @@ ar:
|
|||
notify_mods_when_user_blocked: "إذا تم حظر المستخدم تلقائيا، وإرسال رسالة الى جميع المشرفين."
|
||||
flag_sockpuppets: "إذا رد أحد المستخدمين جديد إلى موضوع من عنوان IP نفسه باسم المستخدم الجديد الذي بدأ هذا الموضوع، علم كل من مناصبهم كدعاية المحتملين."
|
||||
traditional_markdown_linebreaks: "استعمل السطور التالفه التقليديه في Markdown, التي تتطلب مساحتين بيضاوين للسطور التالفه"
|
||||
allow_html_tables: "كل الجداول يجب ان تدخل ب لغة ال HTML مثال TABLE , THEAD , TD , TR , TH سوف يأوذن لهم ( تتطلب مراجعة لكل المقالات القديمة )"
|
||||
post_undo_action_window_mins: "عدد الدقائق التي يسمح فيها للأعضاء بالتراجع عن آخر إجراءاتهم على المنشور (إعجاب، اشارة، إلخ...)"
|
||||
must_approve_users: "يجب أن الموظفين يوافق على جميع حسابات المستخدم الجديدة قبل أن يتم السماح لهم للوصول إلى الموقع. تحذير: تمكين هذا لموقع الحية إلغاء وصول المستخدمين الحاليين غير الموظفين!"
|
||||
pending_users_reminder_delay: "نبه المشرفين إذا وجد اعضاء ينتظرون الموافقة لمدة اطول من الساعات ، قم بوضع الخيار -1 لايقاف التنبيهات ."
|
||||
|
|
|
@ -903,7 +903,6 @@ da:
|
|||
notify_mods_when_user_blocked: "Send en besked til alle moderatorer hvis en bruger blokeres automatisk"
|
||||
flag_sockpuppets: "Hvis en ny bruger svarer på et emne fra den samme IP adresse som den der startede emnet, så rapporter begge at deres indlæg potentielt er spam."
|
||||
traditional_markdown_linebreaks: "Brug traditionelle linjeskift i Markdown, som kræver 2 mellemrum i slutningen af sætningen."
|
||||
allow_html_tables: "Tillad tabeller at blive oprettet i Markdown med brug af HTML tags. TABLE, THEAD, TD, TR, TH vil blive tilladt (kræver en fuld re-indeksering af gamle indlæg som benytter tabeller) "
|
||||
post_undo_action_window_mins: "Antal minutter som brugere er tilladt at fortryde handlinger på et indlæg (like, flag, etc)."
|
||||
must_approve_users: "Personale skal godkende alle nye bruger konti inden de kan tilgå sitet. ADVARSEL: aktivering af dette for et live site vil medføre en ophævning af adgang for eksisterende ikke-personale brugere."
|
||||
pending_users_reminder_delay: "Underret moderatorer hvis nye brugere har ventet på godkendelse i længere end så mange timer. Skriv -1 for at deaktivere notifikationer."
|
||||
|
|
|
@ -921,7 +921,6 @@ de:
|
|||
notify_mods_when_user_blocked: "Wenn ein Benutzer automatisch gesperrt wird, sende eine Nachricht an alle Moderatoren."
|
||||
flag_sockpuppets: "Wenn ein neuer Benutzer auf ein Thema antwortet, das von einem anderen neuen Benutzer aber mit der gleichen IP-Adresse begonnen wurde, markiere beide Beiträge als potenziellen Spam."
|
||||
traditional_markdown_linebreaks: "Traditionelle Zeilenumbrüche in Markdown, die zwei nachfolgende Leerzeichen für einen Zeilenumbruch benötigen."
|
||||
allow_html_tables: "Erlaube es, Tabellen in Markdown mit HTML-Tags einzugeben. TABLE, THEAD, TD, TR, TH werden erlaubt (alle Beiträge mit Tabellen müssen ihr HTML erneuern)"
|
||||
post_undo_action_window_mins: "Minuten, die ein Benutzer hat, um Aktionen auf einen Beitrag rückgängig zu machen (Gefällt mir, Meldung, usw.)."
|
||||
must_approve_users: "Team-Mitglieder müssen alle neuen Benutzerkonten freischalten, bevor diese Zugriff auf die Website erhalten. ACHTUNG: Das Aktivieren dieser Option für eine Live-Site entfernt den Zugriff auch für alle existierenden Benutzer außer für Team-Mitglieder!"
|
||||
pending_users_reminder_delay: "Benachrichtige die Moderatoren, falls neue Benutzer mehr als so viele Stunden auf ihre Genehmigung gewartet haben. Stelle -1 ein, um diese Benachrichtigungen zu deaktivieren."
|
||||
|
|
|
@ -890,7 +890,6 @@ el:
|
|||
notify_mods_when_user_blocked: "Εάν ένας χρήστης αυτόματα μπλοκαριστει, στείλε μήνυμα σε όλους τους συντονιστές."
|
||||
flag_sockpuppets: "Εάν ένας νέος χρήστης απαντήσει σε ένα νήμα από την ίδια διεύθυνση ΙP όπως ο νέος χρήστης, ο οποίος ξεκίνησε το νήμα, και οι δυο δημοσιεύσεις τους θα επισημανθούν ως δυνητικά ανεπιθύμητες."
|
||||
traditional_markdown_linebreaks: "Χρήση παραδοσιακών αλλαγών γραμμών στη Markdown, η οποία απαιτεί δύο κενά διαστήματα για μια αλλαγή γραμμής."
|
||||
allow_html_tables: "Αποδοχή εισδοχής πινάκων στη Markdown με τη χρήση ετικετών HTML. TABLE, THEAD, TD, TR, TH θα μπαίνουν στη λίστα επιτρεπόμενων (απαιτείται πληρής αντιγραφή σε όλες τις αναρτήσεις που περιέχουν πίνακες)"
|
||||
post_undo_action_window_mins: "Αριθμός των λεπτών όπου οι χρήστες δικαιούνται να αναιρέσουν πρόσφατες ενέργειες πάνω σε ένα θέμα (μου αρέσει, επισήμανση, κτλ) "
|
||||
must_approve_users: "Το προσωπικό πρέπει να εγκρίνει όλους τους λογαριασμούς των νέων χρηστών προτού τους επιτραπεί να έχουν πρόσβαση στην ιστοσελίδα. Προειδοποίηση: ενεργοποιώντας το για μια ζωντανή ιστοσελίδα θα έχει ως αποτέλεσμα την ανάκληση για τους υπάρχοντες χρήστες που δεν ανήκουν στο προσωπικό!"
|
||||
pending_users_reminder_delay: "Ειδοποίηση συντονιστών αν καινούργιοι χρήστες περιμένουν για αποδοχή για μεγαλύτερο απο αυτό το χρονικό διάστημα. Όρισέ το στο -1 για να απενεργοποιηθούν οι ειδοποιήσεις."
|
||||
|
|
|
@ -1026,9 +1026,7 @@ en:
|
|||
flag_sockpuppets: "If a new user replies to a topic from the same IP address as the new user who started the topic, flag both of their posts as potential spam."
|
||||
|
||||
traditional_markdown_linebreaks: "Use traditional linebreaks in Markdown, which require two trailing spaces for a linebreak."
|
||||
enable_experimental_markdown_it: "Enable the experimental markdown.it CommonMark engine, WARNING: some plugins may not work correctly"
|
||||
enable_markdown_typographer: "Use basic typography rules to improve text readability of paragraphs of text, replaces (c) (tm) etc, with symbols, reduces number of question marks and so on"
|
||||
allow_html_tables: "Allow tables to be entered in Markdown using HTML tags. TABLE, THEAD, TD, TR, TH will be whitelisted (requires full rebake on all old posts containing tables)"
|
||||
post_undo_action_window_mins: "Number of minutes users are allowed to undo recent actions on a post (like, flag, etc)."
|
||||
must_approve_users: "Staff must approve all new user accounts before they are allowed to access the site. WARNING: enabling this for a live site will revoke access for existing non-staff users!"
|
||||
pending_users_reminder_delay: "Notify moderators if new users have been waiting for approval for longer than this many hours. Set to -1 to disable notifications."
|
||||
|
|
|
@ -932,9 +932,7 @@ es:
|
|||
notify_mods_when_user_blocked: "Si un usuario es bloqueado automáticamente, enviar un mensaje a todos los moderadores."
|
||||
flag_sockpuppets: "Si un nuevo usuario responde a un tema desde la misma dirección de IP que el nuevo usuario que inició el tema, reportar los posts de los dos como spam en potencia."
|
||||
traditional_markdown_linebreaks: "Utiliza saltos de línea tradicionales en Markdown, que requieren dos espacios al final para un salto de línea."
|
||||
enable_experimental_markdown_it: "Habilitar el motor experimental CommonMark markdown.it, ADVERTENCIA: algunos plugins podrían no funcionar correctamente."
|
||||
enable_markdown_typographer: "Utilice reglas básicas de tipografía para mejorar la legibilidad de texto de los párrafos de texto, reemplaza (c) (tm) etc, con símbolos, reduce el número de signos de interrogación y así sucesivamente"
|
||||
allow_html_tables: "Permitir la inserción de tablas en Markdown usando etiquetas HTML. Se permitirá usar TABLE, THEAD, TD, TR o TH (requiere un rebake completo para los posts antiguos que contengan tablas)"
|
||||
post_undo_action_window_mins: "Número de minutos durante los cuales los usuarios pueden deshacer sus acciones recientes en un post (me gusta, reportes, etc)."
|
||||
must_approve_users: "Los miembros administración deben aprobar todas las nuevas cuentas antes de que se les permita el acceso al sitio. AVISO: ¡habilitar esta opción en un sitio activo revocará el acceso a los usuarios que no sean moderadores o admin!"
|
||||
pending_users_reminder_delay: "Notificar a los moderadores si hay nuevos usuarios que hayan estado esperando aprbación durante más estas horas. Usa -1 para desactivar estas notificaciones."
|
||||
|
|
|
@ -884,7 +884,6 @@ fa_IR:
|
|||
notify_mods_when_user_blocked: "اگر کاربر بهطور خودکار مسدود شد، به تمام مدیران پیام بفرست."
|
||||
flag_sockpuppets: "اگر کاربری به موضوع با ای پی برابر با کاربری که نوشته را شروع کرده ٬ آنها را به عنوان هرزنامه پرچم گزاری کن."
|
||||
traditional_markdown_linebreaks: "در مدلهای نشانه گزاری از خط جدید سنتی استفاده کن، که برای linebreak نیاز به دو فضای انتهایی دارد ."
|
||||
allow_html_tables: "اجازه ارسال جدول به صورت markdown با تگ های HTML. TABLE, THEAD, TD, TR, TH قابل استفاده هستند. (نیازمند ایجا دوباره در نوشتههای قدیمی که شامل جدول هستند)"
|
||||
post_undo_action_window_mins: "تعداد دقایقی که کاربران اجازه دارند اقدامی را که در نوشته انجام داده اند باز گردانند. (پسند، پرچم گذاری، چیزهای دیگر)."
|
||||
must_approve_users: "همکاران باید تمامی حسابهای کاربری را قبل از اجازه دسترسی به سایت تایید کنند. اخطار: فعالسازی این گزینه ممکن است باعث جلوگیری از دسترسی کاربرانی که قبلا عضو شدهاند نیز بشود!"
|
||||
pending_users_reminder_delay: "اگر کاربرها بیشتر از این مقدار ساعت منتظر تایید بودند به مدیران اعلام کن. مقدار -1 برای غیرفعالسازی."
|
||||
|
|
|
@ -931,9 +931,7 @@ fi:
|
|||
notify_mods_when_user_blocked: "Jos käyttäjä estetään automaattisesti, lähetä viesti kaikille valvojille."
|
||||
flag_sockpuppets: "Jos uuden käyttäjän luomaan ketjuun vastaa toinen uusi käyttäjä samasta IP-osoitteesta, liputa molemmat viestit mahdolliseksi roskapostiksi."
|
||||
traditional_markdown_linebreaks: "Käytä perinteisiä rivinvaihtoja Markdownissa, joka vaatii kaksi perättäistä välilyöntiä rivin vaihtoon."
|
||||
enable_experimental_markdown_it: "Ota käyttöön kokeellinen markdown.it Commonmark ohjelmistomoottori. VAROITUS: jotkut lisäosat voivat lakata toimimasta oikein"
|
||||
enable_markdown_typographer: "Käytetään tavanomaisia typografisia sääntöjä parantamaan tekstikappaleiden luettavuutta, (c), (tm) ym. korvataan symboleilla, kysymysmerkkien määrää vähennetään jne."
|
||||
allow_html_tables: "Salli taulukoiden syöttäminen Markdowniin käyttäen HTML tageja. TABLE, THEAD, TD, TR, TH valkolistataan (edellyttää kaikkien taulukoita sisältävien vanhojen viestien uudelleen rakentamisen)"
|
||||
post_undo_action_window_mins: "Kuinka monta minuuttia käyttäjällä on aikaa perua viestiin kohdistuva toimi (tykkäys, liputus, etc)."
|
||||
must_approve_users: "Henkilökunnan täytyy hyväksyä kaikki uudet tilit, ennen uusien käyttäjien päästämistä sivustolle. VAROITUS: tämän asetuksen valitseminen poistaa pääsyn kaikilta jo olemassa olevilta henkilökuntaan kuulumattomilta käyttäjiltä."
|
||||
pending_users_reminder_delay: "Ilmoita valvojille, jos uusi käyttäjä on odottanut hyväksyntää kauemmin kuin näin monta tuntia. Aseta -1, jos haluat kytkeä ilmoitukset pois päältä."
|
||||
|
|
|
@ -931,7 +931,6 @@ fr:
|
|||
notify_mods_when_user_blocked: "Si un utilisateur est bloqué automatiquement, envoyer un message à tous les modérateurs."
|
||||
flag_sockpuppets: "Si un nouvel utilisateur répond à un sujet avec la même adresse IP que le nouvel utilisateur qui a commencé le sujet, alors leurs messages seront automatiquement marqués comme spam."
|
||||
traditional_markdown_linebreaks: "Utiliser le retour à la ligne traditionnel dans Markdown, qui nécessite deux espaces pour un saut de ligne."
|
||||
allow_html_tables: "Autoriser la saisie des tableaux dans le Markdown en utilisant les tags HTML : TABLE, THEAD, TD, TR, TH sont autorisés (nécessite un rebake de tous les anciens messages contenant des tableaux)"
|
||||
post_undo_action_window_mins: "Nombre de minutes pendant lesquelles un utilisateur peut annuler une action sur un message (J'aime, signaler, etc.)"
|
||||
must_approve_users: "Les responsables doivent approuver les nouveaux utilisateurs afin qu'ils puissent accéder au site. ATTENTION : activer cette option sur un site en production suspendra l'accès des utilisateurs existants qui ne sont pas des responsables !"
|
||||
pending_users_reminder_delay: "Avertir les modérateurs si des nouveaux utilisateurs sont en attente d'approbation depuis x heures. Mettre -1 pour désactiver les notifications."
|
||||
|
|
|
@ -921,7 +921,6 @@ he:
|
|||
notify_mods_when_user_blocked: "אם משתמש נחסם אוטומטית, שילחו הודעה לכל המנחים."
|
||||
flag_sockpuppets: "אם משתמשים חדשים מגיבים לנושא מכתובת IP זהה לזו של מי שהחל את הנושא, סמנו את הפוסטים של שניהם כספאם פוטנציאלי."
|
||||
traditional_markdown_linebreaks: "שימוש בשבירת שורות מסורתית בסימון, מה שדורש שני רווחים עוקבים למעבר שורה."
|
||||
allow_html_tables: "אפשרו הכנסת טבלאות ב Markdown באמצעות תגיות HTML. התגיות TABLE, THEAD, TD, TR, TH יהיו ברשימה לבנה (מצריך אפייה מחדש של כל הפוסטים הישנים שכוללים טבלאות)"
|
||||
post_undo_action_window_mins: "מספר הדקות בהן מתאפשר למשתמשים לבטל פעולות אחרות בפוסט (לייק, סימון, וכו')."
|
||||
must_approve_users: "על הצוות לאשר את כל המשתמשים החדשים לפני שהם מקבלים גישה לאתר. אזהרה: בחירה זו עבור אתר קיים תשלול גישה ממשתמשים קיימים שאינם מנהלים."
|
||||
pending_users_reminder_delay: "הודיעו למנחים אם משתמשים חדשים ממתינים לאישור למעלה מכמות זו של שעות. קבעו ל -1 כדי לנטרל התראות."
|
||||
|
|
|
@ -934,7 +934,6 @@ it:
|
|||
notify_mods_when_user_blocked: "Se un utente è bloccato automaticamente, manda un messaggio ai moderatori."
|
||||
flag_sockpuppets: "Se un nuovo utente risponde ad un argomento dallo stesso indirizzo IP dell'utente che ha aperto l'argomento stesso, segnala entrambi i messaggi come potenziale spam."
|
||||
traditional_markdown_linebreaks: "Usa l'accapo tradizionale in Markdown, cioè due spazi a fine riga per andare a capo."
|
||||
allow_html_tables: "Consenti di inserire tabelle in Markdown usando tag HTML. I tag TABLE, THEAD, TD, TR, TH saranno consentiti (richiede un full rebake di tutti i vecchi messaggi contenenti tabelle)"
|
||||
post_undo_action_window_mins: "Numero di minuti durante i quali gli utenti possono annullare le loro azioni recenti su un messaggio (segnalazioni, Mi piace, ecc.)."
|
||||
must_approve_users: "Lo staff deve approvare tutti i nuovi account utente prima che essi possano accedere al sito. ATTENZIONE: abilitare l'opzione per un sito live revocherà l'accesso per tutti gli utenti non-staff esistenti!"
|
||||
pending_users_reminder_delay: "Notifica i moderatori se nuovi utenti sono in attesa di approvazione per più di queste ore. Imposta a -1 per disabilitare le notifiche."
|
||||
|
|
|
@ -876,9 +876,7 @@ ko:
|
|||
notify_mods_when_user_blocked: "만약 사용자가 자동 블락되면 중간 운영자에게 메시지 보내기"
|
||||
flag_sockpuppets: "어떤 신규 사용자(예:24이내 가입자)가 글타래를 생성하고 같은 IP주소의 또 다른 신규 사용자가 댓글을 쓰면 자동 스팸 신고"
|
||||
traditional_markdown_linebreaks: "Markdown에서 전통적인 linebreak를 사용, linebreak시 두개의 trailing space를 사용하는 것."
|
||||
enable_experimental_markdown_it: "(실험) CommonMark를 지원하는 markdown.it 엔진을 사용합니다. 경고: 올바르게 작동하지 않는 플러그인이 있을 수 있습니다."
|
||||
enable_markdown_typographer: "문단의 가독성을 높이기 위해서 기본 타이포그라피 룰을 사용합니다. (c) (tm), 기타 기호를 교체하고 연달아 나오는 물음표의 갯수를 줄입니다."
|
||||
allow_html_tables: "마크다운 문서에 HTML 테이블을 허용합니다. TABLE, THEAD, TD, TR, TH 태그를 사용할 수 있습니다.(테이블이 포함된 이전 게시물에 적용하려면 rebake 해야 합니다.)"
|
||||
post_undo_action_window_mins: "사용자가 어떤 글에 대해서 수행한 작업(신고 등)을 취소하는 것이 허용되는 시간(초)"
|
||||
must_approve_users: "스태프는 반드시 사이트 엑세스권한을 허용하기 전에 모든 신규가입계정을 승인해야 합니다. 경고: 이것을 활성화하면 기존 스태프 아닌 회원들의 엑세스권한이 회수됩니다."
|
||||
pending_users_reminder_delay: "새로운 사용자가 승인을 기다리는 시간이 여기에 지정된 시간횟수보다 더 길어길경우 운영자에게 알려줍니다. 알림을 해제하려면 -1로 설정하세요."
|
||||
|
|
|
@ -895,7 +895,6 @@ nl:
|
|||
notify_mods_when_user_blocked: "Als een gebruiker automatisch geblokkeerd is, stuur dan een bericht naar alle moderatoren."
|
||||
flag_sockpuppets: "Als een nieuwe gebruiker antwoord op een topic vanaf hetzelfde ip-adres als de nieuwe gebruiker die het topic opende, markeer dan beide berichten als potentiële spam."
|
||||
traditional_markdown_linebreaks: "Gebruik traditionele regeleinden in Markdown, welke 2 spaties aan het einde van een regel nodig heeft voor een regeleinde."
|
||||
allow_html_tables: "Sta toe dat tabellen in Markdown mogen worden ingevoerd met behulp van HTML-tags. TABLE, TD, TR, TH zullen aan de whitelist worden toegevoegd (vereist volledig herbouwen van alle oude berichten met tabellen)"
|
||||
post_undo_action_window_mins: "Het aantal minuten waarin gebruikers hun recente acties op een bericht nog terug kunnen draaien (liken, markeren, etc)."
|
||||
must_approve_users: "Stafleden moeten alle nieuwe gebruikersaccounts goedkeuren voordat ze de site mogen bezoeken. OPGELET: als dit wordt aangezet voor een actieve site wordt alle toegang voor bestaande niet stafleden ingetrokken."
|
||||
pending_users_reminder_delay: "Moderators informeren als nieuwe gebruikers al langer dan dit aantal uren op goedkeuring wachten. Stel dit in op -1 om meldingen uit te schakelen."
|
||||
|
|
|
@ -956,7 +956,6 @@ pl_PL:
|
|||
notify_mods_when_user_blocked: "If a user is automatically blocked, send a message to all moderators."
|
||||
flag_sockpuppets: "Jeśli nowy użytkownik odpowiada na dany temat z tego samego adresu IP co nowy użytkownik, który założył temat, oznacz ich posty jako potencjalny spam."
|
||||
traditional_markdown_linebreaks: "Używaj tradycyjnych znaków końca linii w Markdown, to znaczy dwóch spacji na końcu linii."
|
||||
allow_html_tables: "Pozwalaj tabelom być zamieszczanym w Markdown przy użyciu tagów HTML. TABLE, THEAD, TD, TR, TH będą dozwolone (wymaga pełnego rebake na wszystkich starych postach zawierających tabele)."
|
||||
post_undo_action_window_mins: "Przez tyle minut użytkownicy mogą cofnąć swoje ostatnie działania przy danym poście (lajki, flagowanie, itd.)."
|
||||
must_approve_users: "Zespół musi zaakceptować wszystkie nowe konta zanim uzyskają dostęp do serwisu. UWAGA: włączenie tego dla już udostępnionej strony sprawi, że zostanie odebrany dostęp wszystkim istniejącym użytkownikom spoza zespołu."
|
||||
pending_users_reminder_delay: "Powiadomić moderatorów jeżeli nowi użytkownicy czekali na zatwierdzenie dłużej niż his mamy godzin. Ustaw -1 aby wyłączyć powiadomienia. "
|
||||
|
|
|
@ -802,7 +802,6 @@ pt:
|
|||
notify_mods_when_user_blocked: "Se um utilizador for bloqueado de forma automática, enviar uma mensagem a todos os moderadores."
|
||||
flag_sockpuppets: "Se um novo utilizador responde a um tópico a partir do mesmo endereço IP do novo utilizador que iniciou o tópico, sinalizar ambas as mensagens como potencial spam."
|
||||
traditional_markdown_linebreaks: "Utilize tradicionais quebras de linha no Markdown, que requer dois espaços no final para uma quebra de linha."
|
||||
allow_html_tables: "Permitir inserção de tabelas em Markdown utilizando tags HTML. TABLE,THEAD, TD, TR,TH fazem parte da lista branca (requer que todas as mensagens antigas que contém tabelas sejam refeitas)"
|
||||
post_undo_action_window_mins: "Número de minutos durante o qual os utilizadores têm permissão para desfazer ações numa mensagem (gostos, sinalizações, etc)."
|
||||
must_approve_users: "O pessoal deve aprovar todas as novas contas de utilizador antes destas terem permissão para aceder ao sítio. AVISO: ativar isto para um sítio ativo irá revogar o acesso aos utilizadores existentes que não fazem parte do pessoal!"
|
||||
pending_users_reminder_delay: "Notificar moderadores se novos utilizadores estiverem à espera de aprovação por mais que esta quantidade de horas. Configurar com -1 para desativar notificações."
|
||||
|
|
|
@ -878,7 +878,6 @@ ro:
|
|||
notify_mods_when_user_blocked: "Dacă un utilizator este blocat automat, trimite un mesaj tuturor moderatorilor."
|
||||
flag_sockpuppets: "Dacă un utilizator nou răspunde unui subiect de la același IP ca utilizatorul ce a pornit subiectul, marchează ambele postări ca potențial spam."
|
||||
traditional_markdown_linebreaks: "Folosește întreruperi de rând tradiționale în Markdown, ceea ce necesită două spații pentru un capăt de rând. "
|
||||
allow_html_tables: "Permite introducerea de tabele în Markdown prin folosirea de etichete HTML. HEAD, TD, TR, TH vor fi autorizate (necesită un rebake pe toate postările vechi ce conțin tabele)"
|
||||
post_undo_action_window_mins: "Numărul de minute în care utilizatorii pot anula acțiunile recente asupra unei postări (aprecieri, marcări cu marcaje de avertizare, etc)."
|
||||
must_approve_users: "Membrii echipei trebuie să aprobe toate conturile noilor utilizatori înainte ca aceștia să poată accesa site-ul. ATENȚIE: activarea acestei opțiuni pentru un site în producție va revoca accesul tuturor utilizatorilor care nu sunt membri ai echipei!"
|
||||
pending_users_reminder_delay: "Notifică moderatorii dacă noii utilizatori sunt în așteptarea aprobării de mai mult de atâtea ore. Setează la -1 pentru a dezactiva notificările."
|
||||
|
|
|
@ -761,7 +761,6 @@ sk:
|
|||
notify_mods_when_user_blocked: "Ak je používateľ automaticky zablokovaný, pošli správu všetkým moderátorom."
|
||||
flag_sockpuppets: "Ak nový používateľ odpovedá na tému z rovnakej IP adresy, ako nový používateľ, ktorý danú tému vytvoril, označ oba ich príspevky ako potencionálny spam."
|
||||
traditional_markdown_linebreaks: "V Markdown použiť tradičné oddeľovače riadkov, čo vyžaduje dve koncové medzery ako oddeľovač riadku."
|
||||
allow_html_tables: "V Markdown umožniť použitie tabuliek pomocou HTML značiek. TABLE, THEAD, TD, TR, TH budú umožnené (vyžaduje \"full rebake\" na všetkých starých príspevkoch ktoré obsahujú tabuľky)"
|
||||
post_undo_action_window_mins: "Počet minút počas ktorých môžu používatelia zrušiť poslednú akciu na príspevku (\"Páči sa\", označenie, atď..)."
|
||||
must_approve_users: "Obsluha musí povoliť účty všetkým novým používateľom skôr než im bude povolený prístup na stránku. UPOZORNENIE: zapnutie na živej stránke spôsobí zrušenie prístupu pre existujúcich používateľov, okrem obsluhy!"
|
||||
pending_users_reminder_delay: "Upozorni moderátora ak nový používateľ čaká na schválenie dlhšie ako tento počet hodín. Nastavte -1 pre vypnutie upozornenia."
|
||||
|
|
|
@ -818,7 +818,6 @@ sv:
|
|||
notify_mods_when_user_blocked: "Om en användare blockeras automatiskt, skicka ett meddelande till alla moderatorer."
|
||||
flag_sockpuppets: "Flagga båda användarnas inlägg som potentiell skräppost om en ny användare svarar på ett ämne från samma IP-adress som den andra nya användaren som skapade ämnet."
|
||||
traditional_markdown_linebreaks: "Använd vanliga radmatningar i Markdown, vilka kräver 2 avslutande mellanslag för en radmatning."
|
||||
allow_html_tables: "Tillåt tabeller att läggas in i Markdown genom användning av HTML-taggar. TABLE, THEAD, TD, TR, TH kommer att vitlistas (kräver full uppdatering/rebake av alla gamla inlägg som innehåller tabeller)"
|
||||
post_undo_action_window_mins: "Antal minuter som en användare tillåts att upphäva handlingar på ett inlägg som gjorts nyligen (gillning, flaggning osv)."
|
||||
must_approve_users: "Personal måste godkänna alla nya användarkonton innan de tillåts använda webbplatsen. VARNING: om det tillåts när webbplatsen är live så kommer det att upphäva tillgång för alla existerande användare som inte är personal!"
|
||||
pending_users_reminder_delay: "Notifiera moderatorer om nya användare har väntat på godkännande längre än så här många timmar. Ange -1 för att inaktivera notifikationer. "
|
||||
|
|
|
@ -705,7 +705,6 @@ tr_TR:
|
|||
notify_mods_when_user_blocked: "Eğer bir kullanıcı otomatik olarak engellendiyse, tüm moderatörlere ileti yolla."
|
||||
flag_sockpuppets: "Eğer, yeni kullanıcı konuya, konuyu başlatan yeni kullanıcı ile aynı IP adresinden cevap yazarsa, her iki gönderiyi de potansiyel istenmeyen olarak bildir. "
|
||||
traditional_markdown_linebreaks: "Markdown'da, satır sonundan önce yazının sağında iki tane boşluk gerektiren, geleneksel satır sonu metodunu kullan."
|
||||
allow_html_tables: "Tabloların HTML etiketleri kullanılarak Markdown ile oluşturulmasına izin verin. TABLE, THEAD, TD, TR, TH kabul edilir (tablo içeren tüm eski gönderilerin yenilenmesini gerektirir) "
|
||||
post_undo_action_window_mins: "Bir gönderide yapılan yeni eylemlerin (beğenme, bildirme vb) geri alınabileceği zaman, dakika olarak"
|
||||
must_approve_users: "Siteye erişimlerine izin verilmeden önce tüm yeni kullanıcı hesaplarının görevliler tarafından onaylanması gerekir. UYARI: yayındaki bir site için bunu etkinleştirmek görevli olmayan hesapların erişimini iptal edecek."
|
||||
pending_users_reminder_delay: "Belirtilen saatten daha uzun bir süredir onay bekleyen yeni kullanıcılar mevcutsa moderatörleri bilgilendir. Bilgilendirmeyi devre dışı bırakmak için -1 girin."
|
||||
|
|
|
@ -681,7 +681,6 @@ vi:
|
|||
notify_mods_when_user_blocked: "Nếu một thành viên được khóa tự động, gửi tin nhắn đến tất cả các điều hành viên."
|
||||
flag_sockpuppets: "Nếu thành viên mới trả lời chủ đề có cùng địa chỉ IP với thành viên mới tạo chủ đề, đánh dấu các bài viết của họ là spam tiềm năng."
|
||||
traditional_markdown_linebreaks: "Sử dụng ngắt dòng truyền thống trong Markdown, đòi hỏi hai khoảng trống kế tiếp cho một ngắt dòng."
|
||||
allow_html_tables: "Cho phép nhập bảng trong Markdown sử dụng các thẻ HTML. TABLE, THEAD, TD, TR, TH sẽ được sử dụng (đòi hỏi thực hiện lại cho các bài viết cũ có chứa bảng)"
|
||||
post_undo_action_window_mins: "Số phút thành viên được phép làm lại các hành động gần đây với bài viết (like, đánh dấu...)."
|
||||
must_approve_users: "Quản trị viên phải duyệt tất cả các tài khoản thành viên mới trước khi họ có quyền truy cập website. LƯU Ý: bật tính năng này trên site đang hoạt động sẽ hủy bỏ quyền truy cập đối với các tài khoản thành viên hiện tại!"
|
||||
pending_users_reminder_delay: "Thông báo cho quản trị viên nếu thành viên mới đã chờ duyệt lâu hơn số giờ được thiết lập ở đây, đặt là -1 để tắt thông báo."
|
||||
|
|
|
@ -881,7 +881,6 @@ zh_CN:
|
|||
notify_mods_when_user_blocked: "如果一个用户被自动封禁了,发送一个私信给所有管理员。"
|
||||
flag_sockpuppets: "如果一个新用户开始了一个主题,并且同时另一个新用户以同一个 IP 在该主题回复,他们所有的帖子都将被自动标记为垃圾。"
|
||||
traditional_markdown_linebreaks: "在 Markdown 中使用传统换行符,即用两个尾随空格来换行"
|
||||
allow_html_tables: "允许在输入 Markdown 文本时使用表格 HTML 标签。标签 TABLE、THEAD、TD、TR、TH 将被允许使用,即白名单这些标签(需要重置所有包含表格的老帖子的 HTML)"
|
||||
post_undo_action_window_mins: "允许用户在帖子上进行撤销操作(赞、标记等)所需等待的间隔分钟数"
|
||||
must_approve_users: "新用户在被允许访问站点前需要由管理人员批准。警告:在运行的站点中启用将解除所有非管理人员用户的访问权限!"
|
||||
pending_users_reminder_delay: "如果新用户等待批准时间超过此小时设置则通知版主。设置 -1 关闭通知。"
|
||||
|
|
|
@ -832,7 +832,6 @@ zh_TW:
|
|||
notify_mods_when_user_blocked: "若有用戶被自動封鎖,將發送訊息給所有板主。"
|
||||
flag_sockpuppets: "如果一個新用戶開始了一個主題,並且同時另一個新用戶以同一個 IP 在該主題回復,他們所有的帖子都將被自動標記為垃圾。"
|
||||
traditional_markdown_linebreaks: "在 Markdown 中使用傳統的換行符號,即用兩個行末空格來換行"
|
||||
allow_html_tables: "允許在輸入 Markdown 文本時使用表格 HTML 標籤。標籤 TABLE、THEAD、TD、TR、TH 將被允許使用,即白名單這些標籤(需要重置所有包含表格的老帖子的 HTML)"
|
||||
post_undo_action_window_mins: "允許用戶在帖子上進行撤銷操作(讚、標記等)所需等待的時間分隔(分鐘)"
|
||||
must_approve_users: "新用戶在被允許訪問站點前需要由管理人員批准。警告:在運行的站點中啟用將解除所有非管理人員用戶的訪問權限!"
|
||||
pending_users_reminder_delay: "如果新用戶等待批准時間超過此小時設置則通知版主。設置 -1 關閉通知。"
|
||||
|
|
|
@ -495,19 +495,12 @@ posting:
|
|||
delete_removed_posts_after:
|
||||
client: true
|
||||
default: 24
|
||||
enable_experimental_markdown_it:
|
||||
client: true
|
||||
default: false
|
||||
shadowed_by_global: true
|
||||
traditional_markdown_linebreaks:
|
||||
client: true
|
||||
default: false
|
||||
enable_markdown_typographer:
|
||||
client: true
|
||||
default: false
|
||||
allow_html_tables:
|
||||
client: true
|
||||
default: false
|
||||
default: true
|
||||
suppress_reply_directly_below:
|
||||
client: true
|
||||
default: true
|
||||
|
|
|
@ -80,11 +80,7 @@ module PrettyText
|
|||
ctx_load(ctx, "#{Rails.root}/app/assets/javascripts/discourse-loader.js")
|
||||
ctx_load(ctx, "vendor/assets/javascripts/lodash.js")
|
||||
ctx_load_manifest(ctx, "pretty-text-bundle.js")
|
||||
|
||||
if SiteSetting.enable_experimental_markdown_it
|
||||
ctx_load_manifest(ctx, "markdown-it-bundle.js")
|
||||
end
|
||||
|
||||
ctx_load_manifest(ctx, "markdown-it-bundle.js")
|
||||
root_path = "#{Rails.root}/app/assets/javascripts/"
|
||||
|
||||
apply_es6_file(ctx, root_path, "discourse/lib/utilities")
|
||||
|
@ -152,13 +148,6 @@ module PrettyText
|
|||
paths[:S3BaseUrl] = Discourse.store.absolute_base_url
|
||||
end
|
||||
|
||||
if SiteSetting.enable_experimental_markdown_it
|
||||
# defer load markdown it
|
||||
unless context.eval("window.markdownit")
|
||||
ctx_load_manifest(context, "markdown-it-bundle.js")
|
||||
end
|
||||
end
|
||||
|
||||
custom_emoji = {}
|
||||
Emoji.custom.map { |e| custom_emoji[e.name] = e.url }
|
||||
|
||||
|
@ -186,12 +175,14 @@ module PrettyText
|
|||
|
||||
buffer << "__textOptions = __buildOptions(__optInput);\n"
|
||||
|
||||
# Be careful disabling sanitization. We allow for custom emails
|
||||
if opts[:sanitize] == false
|
||||
buffer << ('__textOptions.sanitize = false;')
|
||||
end
|
||||
|
||||
buffer << ("__pt = new __PrettyText(__textOptions);")
|
||||
|
||||
# Be careful disabling sanitization. We allow for custom emails
|
||||
if opts[:sanitize] == false
|
||||
buffer << ('__pt.disableSanitizer();')
|
||||
end
|
||||
|
||||
opts = context.eval(buffer)
|
||||
|
||||
DiscourseEvent.trigger(:markdown_context, context)
|
||||
|
|
|
@ -7,8 +7,6 @@ __utils = require('discourse/lib/utilities');
|
|||
__emojiUnicodeReplacer = null;
|
||||
|
||||
__setUnicode = function(replacements) {
|
||||
require('pretty-text/engines/discourse-markdown/emoji').setUnicodeReplacements(replacements);
|
||||
|
||||
let unicodeRegexp = new RegExp(Object.keys(replacements).sort().reverse().join("|"), "g");
|
||||
|
||||
__emojiUnicodeReplacer = function(text) {
|
||||
|
|
|
@ -1,25 +1,3 @@
|
|||
import { registerOption } from 'pretty-text/pretty-text';
|
||||
|
||||
function insertDetails(_, summary, details) {
|
||||
return `<details><summary>${summary}</summary>${details}</details>`;
|
||||
}
|
||||
|
||||
// replace all [details] BBCode with HTML 5.1 equivalent
|
||||
function replaceDetails(text) {
|
||||
text = text || "";
|
||||
|
||||
while (text !== (text = text.replace(/\[details=([^\]]+)\]((?:(?!\[details=[^\]]+\]|\[\/details\])[\S\s])*)\[\/details\]/ig, insertDetails)));
|
||||
|
||||
// add new lines to make sure we *always* have a <p> element after </summary> and around </details>
|
||||
// otherwise we can't hide the content since we can't target text nodes via CSS
|
||||
return text.replace(/<\/summary>/ig, "</summary>\n\n")
|
||||
.replace(/<\/details>/ig, "\n\n</details>\n\n");
|
||||
}
|
||||
|
||||
registerOption((siteSettings, opts) => {
|
||||
opts.features.details = true;
|
||||
});
|
||||
|
||||
const rule = {
|
||||
tag: 'details',
|
||||
before: function(state, attrs) {
|
||||
|
@ -46,11 +24,7 @@ export function setup(helper) {
|
|||
'details.elided'
|
||||
]);
|
||||
|
||||
if (helper.markdownIt) {
|
||||
helper.registerPlugin(md => {
|
||||
md.block.bbcode_ruler.push('details', rule);
|
||||
});
|
||||
} else {
|
||||
helper.addPreProcessor(text => replaceDetails(text));
|
||||
}
|
||||
helper.registerPlugin(md => {
|
||||
md.block.bbcode_ruler.push('details', rule);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ const defaultOpts = buildOptions({
|
|||
emoji_set: 'emoji_one',
|
||||
highlighted_languages: 'json|ruby|javascript',
|
||||
default_code_lang: 'auto',
|
||||
censored_words: 'shucks|whiz|whizzer'
|
||||
censored_words: ''
|
||||
},
|
||||
getURL: url => url
|
||||
});
|
||||
|
@ -19,17 +19,13 @@ test("details", assert => {
|
|||
assert.equal(new PrettyText(defaultOpts).cook(input), expected.replace(/\/>/g, ">"), text);
|
||||
};
|
||||
cooked(`<details><summary>Info</summary>coucou</details>`,
|
||||
`<details><summary>Info</summary>\n\n<p>coucou</p>\n\n</details>`,
|
||||
`<details><summary>Info</summary>coucou</details>`,
|
||||
"manual HTML for details");
|
||||
cooked(` <details><summary>Info</summary>coucou</details>`,
|
||||
`<details><summary>Info</summary>\n\n<p>coucou</p>\n\n</details>`,
|
||||
"manual HTML for details with a space");
|
||||
|
||||
cooked(`<details open="open"><summary>Info</summary>coucou</details>`,
|
||||
`<details open="open"><summary>Info</summary>\n\n<p>coucou</p>\n\n</details>`,
|
||||
"open attribute");
|
||||
|
||||
cooked(`<details open><summary>Info</summary>coucou</details>`,
|
||||
`<details open><summary>Info</summary>\n\n<p>coucou</p>\n\n</details>`,
|
||||
"open attribute");
|
||||
cooked("[details=testing]\ntest\n[/details]",
|
||||
`<details>
|
||||
<summary>
|
||||
testing</summary>
|
||||
<p>test</p>
|
||||
</details>`);
|
||||
});
|
||||
|
|
1
script/.gitignore
vendored
Normal file
1
script/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
tmp/*
|
|
@ -53,7 +53,6 @@ class ImportScripts::Lithium < ImportScripts::Base
|
|||
|
||||
def execute
|
||||
|
||||
SiteSetting.allow_html_tables = true
|
||||
@max_start_id = Post.maximum(:id)
|
||||
|
||||
import_categories
|
||||
|
|
24
script/test_pretty_text.rb
Normal file
24
script/test_pretty_text.rb
Normal file
|
@ -0,0 +1,24 @@
|
|||
require File.expand_path("../../config/environment", __FILE__)
|
||||
|
||||
|
||||
puts PrettyText.cook "test"
|
||||
1000.times do |i|
|
||||
# PrettyText.v8.eval <<~JS
|
||||
# window.markdownit().render('test');
|
||||
# JS
|
||||
PrettyText.cook "test"
|
||||
|
||||
PrettyText.v8.eval('gc()')
|
||||
|
||||
# if i % 500 == 0
|
||||
#p PrettyText.v8.heap_stats
|
||||
# end
|
||||
end
|
||||
|
||||
# sam@ubuntu script % ruby test_pretty_text.rb
|
||||
# {:total_physical_size=>10556240, :total_heap_size_executable=>5242880, :total_heap_size=>16732160, :used_heap_size=>7483336, :heap_size_limit=>1501560832}
|
||||
# {:total_physical_size=>288670880, :total_heap_size_executable=>6291456, :total_heap_size=>292507648, :used_heap_size=>252365360, :heap_size_limit=>1501560832}
|
||||
# {:total_physical_size=>543060056, :total_heap_size_executable=>6291456, :total_heap_size=>548360192, :used_heap_size=>503699768, :heap_size_limit=>1501560832}
|
||||
# {:total_physical_size=>793401560, :total_heap_size_executable=>6291456, :total_heap_size=>801067008, :used_heap_size=>739517840, :heap_size_limit=>1501560832}
|
||||
# {:total_physical_size=>1045932696, :total_heap_size_executable=>6291456, :total_heap_size=>1055870976, :used_heap_size=>992549688, :heap_size_limit=>1501560832}
|
||||
# {:total_physical_size=>1298442008, :total_heap_size_executable=>6291456, :total_heap_size=>1309626368, :used_heap_size=>1224681072, :heap_size_limit=>1501560832}
|
|
@ -164,7 +164,7 @@ describe CookedPostProcessor do
|
|||
|
||||
it "generates overlay information" do
|
||||
cpp.post_process_images
|
||||
expect(cpp.html).to match_html "<p><div class=\"lightbox-wrapper\"><a data-download-href=\"/uploads/default/#{upload.sha1}\" href=\"/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"logo.png\"><img src=\"/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
expect(cpp.html).to match_html "<p>\n<div class=\"lightbox-wrapper\"><a data-download-href=\"/uploads/default/#{upload.sha1}\" href=\"/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"logo.png\"><img src=\"/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
<span class=\"filename\">logo.png</span><span class=\"informations\">1750x2000 1.21 KB</span><span class=\"expand\"></span>
|
||||
</div></a></div></p>"
|
||||
expect(cpp).to be_dirty
|
||||
|
@ -197,7 +197,7 @@ describe CookedPostProcessor do
|
|||
|
||||
it "generates overlay information" do
|
||||
cpp.post_process_images
|
||||
expect(cpp.html).to match_html "<p><div class=\"lightbox-wrapper\"><a data-download-href=\"/subfolder/uploads/default/#{upload.sha1}\" href=\"/subfolder/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"logo.png\"><img src=\"/subfolder/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
expect(cpp.html).to match_html "<p>\n<div class=\"lightbox-wrapper\"><a data-download-href=\"/subfolder/uploads/default/#{upload.sha1}\" href=\"/subfolder/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"logo.png\"><img src=\"/subfolder/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
<span class=\"filename\">logo.png</span><span class=\"informations\">1750x2000 1.21 KB</span><span class=\"expand\"></span>
|
||||
</div></a></div></p>"
|
||||
expect(cpp).to be_dirty
|
||||
|
@ -206,7 +206,7 @@ describe CookedPostProcessor do
|
|||
it "should escape the filename" do
|
||||
upload.update_attributes!(original_filename: "><img src=x onerror=alert('haha')>.png")
|
||||
cpp.post_process_images
|
||||
expect(cpp.html).to match_html "<p><div class=\"lightbox-wrapper\"><a data-download-href=\"/subfolder/uploads/default/#{upload.sha1}\" href=\"/subfolder/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"&gt;&lt;img src=x onerror=alert(&#39;haha&#39;)&gt;.png\"><img src=\"/subfolder/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
expect(cpp.html).to match_html "<p>\n<div class=\"lightbox-wrapper\"><a data-download-href=\"/subfolder/uploads/default/#{upload.sha1}\" href=\"/subfolder/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"&gt;&lt;img src=x onerror=alert(&#39;haha&#39;)&gt;.png\"><img src=\"/subfolder/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
<span class=\"filename\">&gt;&lt;img src=x onerror=alert(&#39;haha&#39;)&gt;.png</span><span class=\"informations\">1750x2000 1.21 KB</span><span class=\"expand\"></span>
|
||||
</div></a></div></p>"
|
||||
end
|
||||
|
@ -233,7 +233,7 @@ describe CookedPostProcessor do
|
|||
|
||||
it "generates overlay information" do
|
||||
cpp.post_process_images
|
||||
expect(cpp.html).to match_html "<p><div class=\"lightbox-wrapper\"><a data-download-href=\"/uploads/default/#{upload.sha1}\" href=\"/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"WAT\"><img src=\"/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" title=\"WAT\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
expect(cpp.html).to match_html "<p>\n<div class=\"lightbox-wrapper\"><a data-download-href=\"/uploads/default/#{upload.sha1}\" href=\"/uploads/default/1/1234567890123456.jpg\" class=\"lightbox\" title=\"WAT\"><img src=\"/uploads/default/optimized/1X/#{upload.sha1}_1_690x788.png\" title=\"WAT\" width=\"690\" height=\"788\"><div class=\"meta\">
|
||||
<span class=\"filename\">WAT</span><span class=\"informations\">1750x2000 1.21 KB</span><span class=\"expand\"></span>
|
||||
</div></a></div></p>"
|
||||
expect(cpp).to be_dirty
|
||||
|
@ -652,7 +652,7 @@ describe CookedPostProcessor do
|
|||
let(:cpp) { CookedPostProcessor.new(post) }
|
||||
|
||||
context "emoji inside a quote" do
|
||||
let(:post) { Fabricate(:post, raw: "time to eat some sweet [quote]:candy:[/quote] mmmm") }
|
||||
let(:post) { Fabricate(:post, raw: "time to eat some sweet \n[quote]\n:candy:\n[/quote]\n mmmm") }
|
||||
|
||||
it "doesn't award a badge when the emoji is in a quote" do
|
||||
cpp.grant_badges
|
||||
|
|
|
@ -5,7 +5,7 @@ require 'html_normalize'
|
|||
describe PrettyText do
|
||||
|
||||
before do
|
||||
SiteSetting.enable_experimental_markdown_it = true
|
||||
SiteSetting.enable_markdown_typographer = false
|
||||
end
|
||||
|
||||
def n(html)
|
||||
|
@ -61,7 +61,7 @@ describe PrettyText do
|
|||
[/quote]
|
||||
MD
|
||||
html = <<~HTML
|
||||
<aside class="quote" data-post="123" data-topic="456">
|
||||
<aside class="quote" data-post="123" data-topic="456" data-full="true">
|
||||
<div class="title">
|
||||
<div class="quote-controls"></div>
|
||||
<img alt width="20" height="20" src="//test.localhost/uploads/default/avatars/42d/57c/46ce7ee487/40.png" class="avatar"> #{user.username}:</div>
|
||||
|
@ -786,6 +786,7 @@ HTML
|
|||
expect(PrettyText.cook("<http://a.com>")).not_to include('onebox')
|
||||
expect(PrettyText.cook(" http://a.com")).not_to include('onebox')
|
||||
expect(PrettyText.cook("a\n http://a.com")).not_to include('onebox')
|
||||
expect(PrettyText.cook("sam@sam.com")).not_to include('onebox')
|
||||
end
|
||||
|
||||
it "can handle bbcode" do
|
||||
|
@ -857,7 +858,13 @@ HTML
|
|||
|
||||
it "supports url bbcode" do
|
||||
cooked = PrettyText.cook "[url]http://sam.com[/url]"
|
||||
html = '<p><a href="http://sam.com" data-bbcode="true" rel="nofollow noopener">http://sam.com</a></p>'
|
||||
html = '<p><a href="http://sam.com" data-bbcode="true" rel="nofollow noopener">http://sam.com</a></p>';
|
||||
expect(cooked).to eq(html)
|
||||
end
|
||||
|
||||
it "supports nesting tags in url" do
|
||||
cooked = PrettyText.cook("[url=http://sam.com][b]I am sam[/b][/url]")
|
||||
html = '<p><a href="http://sam.com" data-bbcode="true" rel="nofollow noopener"><span class="bbcode-b">I am sam</span></a></p>';
|
||||
expect(cooked).to eq(html)
|
||||
end
|
||||
|
||||
|
@ -875,21 +882,36 @@ HTML
|
|||
|
||||
it "support special handling for space in urls" do
|
||||
cooked = PrettyText.cook "http://testing.com?a%20b"
|
||||
html = '<p><a href="http://testing.com?a%20b" class="onebox" rel="nofollow noopener">http://testing.com?a%20b</a></p>'
|
||||
html = '<p><a href="http://testing.com?a%20b" class="onebox" target="_blank" rel="nofollow noopener">http://testing.com?a%20b</a></p>'
|
||||
expect(cooked).to eq(html)
|
||||
end
|
||||
|
||||
it "supports onebox for decoded urls" do
|
||||
cooked = PrettyText.cook "http://testing.com?a%50b"
|
||||
html = '<p><a href="http://testing.com?a%50b" class="onebox" rel="nofollow noopener">http://testing.com?aPb</a></p>'
|
||||
html = '<p><a href="http://testing.com?a%50b" class="onebox" target="_blank" rel="nofollow noopener">http://testing.com?aPb</a></p>'
|
||||
expect(cooked).to eq(html)
|
||||
end
|
||||
|
||||
it "should sanitize the html" do
|
||||
expect(PrettyText.cook("<test>alert(42)</test>")).to eq "<p>alert(42)</p>"
|
||||
end
|
||||
|
||||
it "should not onebox magically linked urls" do
|
||||
expect(PrettyText.cook('[url]site.com[/url]')).not_to include('onebox')
|
||||
end
|
||||
|
||||
it "should sanitize the html" do
|
||||
expect(PrettyText.cook("<p class='hi'>hi</p>")).to eq "<p>hi</p>"
|
||||
end
|
||||
|
||||
it "should strip SCRIPT" do
|
||||
expect(PrettyText.cook("<script>alert(42)</script>")).to eq ""
|
||||
end
|
||||
|
||||
it "should allow sanitize bypass" do
|
||||
expect(PrettyText.cook("<test>alert(42)</test>", sanitize: false)).to eq "<p><test>alert(42)</test></p>"
|
||||
end
|
||||
|
||||
# custom rule used to specify image dimensions via alt tags
|
||||
describe "image dimensions" do
|
||||
it "allows title plus dimensions" do
|
||||
|
|
|
@ -50,7 +50,7 @@ describe Jobs::ProcessPost do
|
|||
|
||||
it "extracts links to quoted posts" do
|
||||
quoted_post = Fabricate(:post, raw: "This is a post with a link to https://www.discourse.org", post_number: 42)
|
||||
post.update_columns(raw: "This quote is the best\n\n[quote=\"#{quoted_post.user.username}, topic:#{quoted_post.topic_id}, post:#{quoted_post.post_number}\"]#{quoted_post.excerpt}[/quote]")
|
||||
post.update_columns(raw: "This quote is the best\n\n[quote=\"#{quoted_post.user.username}, topic:#{quoted_post.topic_id}, post:#{quoted_post.post_number}\"]\n#{quoted_post.excerpt}\n[/quote]")
|
||||
# when creating a quote, we also create the reflexion link
|
||||
expect { Jobs::ProcessPost.new.execute(post_id: post.id) }.to change { TopicLink.count }.by(2)
|
||||
end
|
||||
|
|
|
@ -85,11 +85,6 @@ describe PostAnalyzer do
|
|||
post_analyzer = PostAnalyzer.new(raw_three_links, default_topic_id)
|
||||
expect(post_analyzer.linked_hosts).to eq({"discourse.org" => 1, "www.imdb.com" => 1})
|
||||
end
|
||||
|
||||
it 'returns blank for ipv6 output' do
|
||||
post_analyzer = PostAnalyzer.new('PING www.google.com(lb-in-x93.1e100.net) 56 data bytes', default_topic_id)
|
||||
expect(post_analyzer.linked_hosts).to be_blank
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -181,12 +176,14 @@ describe PostAnalyzer do
|
|||
end
|
||||
|
||||
it "ignores pre" do
|
||||
post_analyzer = PostAnalyzer.new("<pre>@Jake</pre> @Finn", default_topic_id)
|
||||
# note, CommonMark has rules for dealing with HTML, if your paragraph starts with it
|
||||
# it will no longer be an "inline" so this means that @Finn in this case would not be a mention
|
||||
post_analyzer = PostAnalyzer.new(". <pre>@Jake</pre> @Finn", default_topic_id)
|
||||
expect(post_analyzer.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
it "catches content between pre tags" do
|
||||
post_analyzer = PostAnalyzer.new("<pre>hello</pre> @Finn <pre></pre>", default_topic_id)
|
||||
post_analyzer = PostAnalyzer.new(". <pre>hello</pre> @Finn <pre></pre>", default_topic_id)
|
||||
expect(post_analyzer.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
|
@ -201,7 +198,7 @@ describe PostAnalyzer do
|
|||
end
|
||||
|
||||
it "ignores quotes" do
|
||||
post_analyzer = PostAnalyzer.new("[quote=\"Evil Trout\"]@Jake[/quote] @Finn", default_topic_id)
|
||||
post_analyzer = PostAnalyzer.new("[quote=\"Evil Trout\"]\n@Jake\n[/quote]\n @Finn", default_topic_id)
|
||||
expect(post_analyzer.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
|
|
|
@ -414,12 +414,14 @@ describe Post do
|
|||
end
|
||||
|
||||
it "ignores pre" do
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "<pre>@Jake</pre> @Finn"))
|
||||
# we need to force an inline
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "p <pre>@Jake</pre> @Finn"))
|
||||
expect(post.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
it "catches content between pre tags" do
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "<pre>hello</pre> @Finn <pre></pre>"))
|
||||
# per common mark we need to force an inline
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "a <pre>hello</pre> @Finn <pre></pre>"))
|
||||
expect(post.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
|
@ -429,7 +431,7 @@ describe Post do
|
|||
end
|
||||
|
||||
it "ignores quotes" do
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "[quote=\"Evil Trout\"]@Jake[/quote] @Finn"))
|
||||
post = Fabricate.build(:post, post_args.merge(raw: "[quote=\"Evil Trout\"]\n@Jake\n[/quote]\n@Finn"))
|
||||
expect(post.raw_mentions).to eq(['finn'])
|
||||
end
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import { IMAGE_VERSION as v} from 'pretty-text/emoji';
|
|||
|
||||
QUnit.module("lib:pretty-text");
|
||||
|
||||
const defaultOpts = buildOptions({
|
||||
const rawOpts = {
|
||||
siteSettings: {
|
||||
enable_emoji: true,
|
||||
emoji_set: 'emoji_one',
|
||||
|
@ -15,7 +15,9 @@ const defaultOpts = buildOptions({
|
|||
censored_pattern: '\\d{3}-\\d{4}|tech\\w*'
|
||||
},
|
||||
getURL: url => url
|
||||
});
|
||||
};
|
||||
|
||||
const defaultOpts = buildOptions(rawOpts);
|
||||
|
||||
QUnit.assert.cooked = function(input, expected, message) {
|
||||
const actual = new PrettyText(defaultOpts).cook(input);
|
||||
|
@ -28,7 +30,8 @@ QUnit.assert.cooked = function(input, expected, message) {
|
|||
};
|
||||
|
||||
QUnit.assert.cookedOptions = function(input, opts, expected, message) {
|
||||
const actual = new PrettyText(_.merge({}, defaultOpts, opts)).cook(input);
|
||||
const merged = _.merge({}, rawOpts, opts);
|
||||
const actual = new PrettyText(buildOptions(merged)).cook(input);
|
||||
this.pushResult({
|
||||
result: actual === expected,
|
||||
actual,
|
||||
|
@ -41,12 +44,18 @@ QUnit.assert.cookedPara = function(input, expected, message) {
|
|||
QUnit.assert.cooked(input, `<p>${expected}</p>`, message);
|
||||
};
|
||||
|
||||
QUnit.test("buildOptions", assert => {
|
||||
assert.ok(buildOptions({ siteSettings: { allow_html_tables: true } }).features.table, 'tables enabled');
|
||||
assert.ok(!buildOptions({ siteSettings: { allow_html_tables: false } }).features.table, 'tables disabled');
|
||||
|
||||
assert.ok(buildOptions({ siteSettings: { enable_emoji: true } }).features.emoji, 'emoji enabled');
|
||||
assert.ok(!buildOptions({ siteSettings: { enable_emoji: false } }).features.emoji, 'emoji disabled');
|
||||
QUnit.skip("Pending Engine fixes and spec fixes", assert => {
|
||||
assert.cooked("Derpy: http://derp.com?_test_=1",
|
||||
'<p>Derpy: <a href=https://derp.com?_test_=1"http://derp.com?_test_=1">http://derp.com?_test_=1</a></p>',
|
||||
"works with underscores in urls");
|
||||
|
||||
assert.cooked("**a*_b**", "<p><strong>a*_b</strong></p>", "allows for characters within bold");
|
||||
});
|
||||
|
||||
QUnit.test("buildOptions", assert => {
|
||||
assert.ok(buildOptions({ siteSettings: { enable_emoji: true } }).discourse.features.emoji, 'emoji enabled');
|
||||
assert.ok(!buildOptions({ siteSettings: { enable_emoji: false } }).discourse.features.emoji, 'emoji disabled');
|
||||
});
|
||||
|
||||
QUnit.test("basic cooking", assert => {
|
||||
|
@ -69,10 +78,8 @@ QUnit.test("Nested bold and italics", assert => {
|
|||
|
||||
QUnit.test("Traditional Line Breaks", assert => {
|
||||
const input = "1\n2\n3";
|
||||
assert.cooked(input, "<p>1<br/>2<br/>3</p>", "automatically handles trivial newlines");
|
||||
|
||||
const result = new PrettyText({ traditionalMarkdownLinebreaks: true }).cook(input);
|
||||
assert.equal(result, "<p>1\n2\n3</p>");
|
||||
assert.cooked(input, "<p>1<br>\n2<br>\n3</p>", "automatically handles trivial newlines");
|
||||
assert.cookedOptions(input, { siteSettings: {traditional_markdown_linebreaks: true} }, "<p>1\n2\n3</p>");
|
||||
});
|
||||
|
||||
QUnit.test("Unbalanced underscores", assert => {
|
||||
|
@ -81,15 +88,19 @@ QUnit.test("Unbalanced underscores", assert => {
|
|||
|
||||
QUnit.test("Line Breaks", assert => {
|
||||
assert.cooked("[] first choice\n[] second choice",
|
||||
"<p>[] first choice<br/>[] second choice</p>",
|
||||
"<p>[] first choice<br>\n[] second choice</p>",
|
||||
"it handles new lines correctly with [] options");
|
||||
|
||||
// note this is a change from previous engine but is correct
|
||||
// we have an html block and behavior is defined per common mark
|
||||
// spec
|
||||
// ole engine would wrap trout in a <p>
|
||||
assert.cooked("<blockquote>evil</blockquote>\ntrout",
|
||||
"<blockquote>evil</blockquote>\n\n<p>trout</p>",
|
||||
"<blockquote>evil</blockquote>\ntrout",
|
||||
"it doesn't insert <br> after blockquotes");
|
||||
|
||||
assert.cooked("leading<blockquote>evil</blockquote>\ntrout",
|
||||
"leading<blockquote>evil</blockquote>\n\n<p>trout</p>",
|
||||
"<p>leading<blockquote>evil</blockquote><br>\ntrout</p>",
|
||||
"it doesn't insert <br> after blockquotes with leading text");
|
||||
});
|
||||
|
||||
|
@ -114,10 +125,6 @@ QUnit.test("Links", assert => {
|
|||
'<p>Derpy: <a href="http://derp.com?__test=1">http://derp.com?__test=1</a></p>',
|
||||
"works with double underscores in urls");
|
||||
|
||||
assert.cooked("Derpy: http://derp.com?_test_=1",
|
||||
'<p>Derpy: <a href="http://derp.com?_test_=1">http://derp.com?_test_=1</a></p>',
|
||||
"works with underscores in urls");
|
||||
|
||||
assert.cooked("Atwood: www.codinghorror.com",
|
||||
'<p>Atwood: <a href="http://www.codinghorror.com">www.codinghorror.com</a></p>',
|
||||
"autolinks something that begins with www");
|
||||
|
@ -139,11 +146,11 @@ QUnit.test("Links", assert => {
|
|||
"autolinks a URL with parentheses (like Wikipedia)");
|
||||
|
||||
assert.cooked("Here's a tweet:\nhttps://twitter.com/evil_trout/status/345954894420787200",
|
||||
"<p>Here's a tweet:<br/><a href=\"https://twitter.com/evil_trout/status/345954894420787200\" class=\"onebox\" target=\"_blank\">https://twitter.com/evil_trout/status/345954894420787200</a></p>",
|
||||
"<p>Here's a tweet:<br>\n<a href=\"https://twitter.com/evil_trout/status/345954894420787200\" class=\"onebox\" target=\"_blank\">https://twitter.com/evil_trout/status/345954894420787200</a></p>",
|
||||
"It doesn't strip the new line.");
|
||||
|
||||
assert.cooked("1. View @eviltrout's profile here: http://meta.discourse.org/u/eviltrout/activity<br/>next line.",
|
||||
"<ol><li>View <span class=\"mention\">@eviltrout</span>'s profile here: <a href=\"http://meta.discourse.org/u/eviltrout/activity\">http://meta.discourse.org/u/eviltrout/activity</a><br>next line.</li></ol>",
|
||||
"<ol>\n<li>View <span class=\"mention\">@eviltrout</span>'s profile here: <a href=\"http://meta.discourse.org/u/eviltrout/activity\">http://meta.discourse.org/u/eviltrout/activity</a><br>next line.</li>\n</ol>",
|
||||
"allows autolinking within a list without inserting a paragraph.");
|
||||
|
||||
assert.cooked("[3]: http://eviltrout.com", "", "It doesn't autolink markdown link references");
|
||||
|
@ -158,8 +165,8 @@ QUnit.test("Links", assert => {
|
|||
"<a href=\"http://www.imdb.com/name/nm2225369\">http://www.imdb.com/name/nm2225369</a></p>",
|
||||
'allows multiple links on one line');
|
||||
|
||||
assert.cooked("* [Evil Trout][1]\n [1]: http://eviltrout.com",
|
||||
"<ul><li><a href=\"http://eviltrout.com\">Evil Trout</a></li></ul>",
|
||||
assert.cooked("* [Evil Trout][1]\n\n[1]: http://eviltrout.com",
|
||||
"<ul>\n<li><a href=\"http://eviltrout.com\">Evil Trout</a></li>\n</ul>",
|
||||
"allows markdown link references in a list");
|
||||
|
||||
assert.cooked("User [MOD]: Hello!",
|
||||
|
@ -178,7 +185,7 @@ QUnit.test("Links", assert => {
|
|||
|
||||
|
||||
assert.cooked("[Link](http://www.example.com) (with an outer \"description\")",
|
||||
"<p><a href=\"http://www.example.com\">Link</a> (with an outer \"description\")</p>",
|
||||
"<p><a href=\"http://www.example.com\">Link</a> (with an outer "description")</p>",
|
||||
"it doesn't consume closing parens as part of the url");
|
||||
|
||||
assert.cooked("A link inside parentheses (http://www.example.com)",
|
||||
|
@ -191,50 +198,76 @@ QUnit.test("Links", assert => {
|
|||
});
|
||||
|
||||
QUnit.test("simple quotes", assert => {
|
||||
assert.cooked("> nice!", "<blockquote><p>nice!</p></blockquote>", "it supports simple quotes");
|
||||
assert.cooked(" > nice!", "<blockquote><p>nice!</p></blockquote>", "it allows quotes with preceding spaces");
|
||||
assert.cooked("> nice!", "<blockquote>\n<p>nice!</p>\n</blockquote>", "it supports simple quotes");
|
||||
assert.cooked(" > nice!", "<blockquote>\n<p>nice!</p>\n</blockquote>", "it allows quotes with preceding spaces");
|
||||
assert.cooked("> level 1\n> > level 2",
|
||||
"<blockquote><p>level 1</p><blockquote><p>level 2</p></blockquote></blockquote>",
|
||||
"<blockquote>\n<p>level 1</p>\n<blockquote>\n<p>level 2</p>\n</blockquote>\n</blockquote>",
|
||||
"it allows nesting of blockquotes");
|
||||
assert.cooked("> level 1\n> > level 2",
|
||||
"<blockquote><p>level 1</p><blockquote><p>level 2</p></blockquote></blockquote>",
|
||||
"<blockquote>\n<p>level 1</p>\n<blockquote>\n<p>level 2</p>\n</blockquote>\n</blockquote>",
|
||||
"it allows nesting of blockquotes with spaces");
|
||||
|
||||
assert.cooked("- hello\n\n > world\n > eviltrout",
|
||||
"<ul><li>hello</li></ul>\n\n<blockquote><p>world<br/>eviltrout</p></blockquote>",
|
||||
`<ul>
|
||||
<li>
|
||||
<p>hello</p>
|
||||
<blockquote>
|
||||
<p>world<br>
|
||||
eviltrout</p>
|
||||
</blockquote>
|
||||
</li>
|
||||
</ul>`,
|
||||
"it allows quotes within a list.");
|
||||
|
||||
assert.cooked("- <p>eviltrout</p>",
|
||||
"<ul><li><p>eviltrout</p></li></ul>",
|
||||
"<ul>\n<li>\n<p>eviltrout</p></li>\n</ul>",
|
||||
"it allows paragraphs within a list.");
|
||||
|
||||
|
||||
assert.cooked(" > indent 1\n > indent 2", "<blockquote><p>indent 1<br/>indent 2</p></blockquote>", "allow multiple spaces to indent");
|
||||
assert.cooked(" > indent 1\n > indent 2", "<blockquote>\n<p>indent 1<br>\nindent 2</p>\n</blockquote>", "allow multiple spaces to indent");
|
||||
|
||||
});
|
||||
|
||||
QUnit.test("Quotes", assert => {
|
||||
|
||||
assert.cookedOptions("[quote=\"eviltrout, post: 1\"]\na quote\n\nsecond line\n\nthird line[/quote]",
|
||||
assert.cookedOptions("[quote=\"eviltrout, post: 1\"]\na quote\n\nsecond line\n\nthird line\n[/quote]",
|
||||
{ topicId: 2 },
|
||||
"<aside class=\"quote\" data-post=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>eviltrout:</div><blockquote>" +
|
||||
"<p>a quote</p><p>second line</p><p>third line</p></blockquote></aside>",
|
||||
`<aside class=\"quote\" data-post=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
eviltrout:</div>
|
||||
<blockquote>
|
||||
<p>a quote</p>
|
||||
<p>second line</p>
|
||||
<p>third line</p>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"works with multiple lines");
|
||||
|
||||
assert.cookedOptions("1[quote=\"bob, post:1\"]my quote[/quote]2",
|
||||
{ topicId: 2, lookupAvatar: function(name) { return "" + name; }, sanitize: true },
|
||||
"<p>1</p>\n\n<aside class=\"quote\" data-post=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>bob" +
|
||||
"bob:</div><blockquote><p>my quote</p></blockquote></aside>\n\n<p>2</p>",
|
||||
"handles quotes properly");
|
||||
|
||||
assert.cookedOptions("1[quote=\"bob, post:1\"]my quote[/quote]2",
|
||||
assert.cookedOptions("[quote=\"bob, post:1\"]\nmy quote\n[/quote]",
|
||||
{ topicId: 2, lookupAvatar: function() { } },
|
||||
"<p>1</p>\n\n<aside class=\"quote\" data-post=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>bob:" +
|
||||
"</div><blockquote><p>my quote</p></blockquote></aside>\n\n<p>2</p>",
|
||||
`<aside class=\"quote\" data-post=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
bob:</div>
|
||||
<blockquote>
|
||||
<p>my quote</p>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"includes no avatar if none is found");
|
||||
|
||||
assert.cooked(`[quote]\na\n\n[quote]\nb\n[/quote]\n[/quote]`,
|
||||
"<p><aside class=\"quote\"><blockquote><p>a</p><p><aside class=\"quote\"><blockquote><p>b</p></blockquote></aside></p></blockquote></aside></p>",
|
||||
`<aside class=\"quote\">
|
||||
<blockquote>
|
||||
<p>a</p>
|
||||
<aside class=\"quote\">
|
||||
<blockquote>
|
||||
<p>b</p>
|
||||
</blockquote>
|
||||
</aside>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"handles nested quotes properly");
|
||||
|
||||
});
|
||||
|
@ -264,7 +297,7 @@ QUnit.test("Mentions", assert => {
|
|||
"won't add mention class to an email address");
|
||||
|
||||
assert.cooked("hanzo55@yahoo.com",
|
||||
"<p>hanzo55@yahoo.com</p>",
|
||||
"<p><a href=\"mailto:hanzo55@yahoo.com\">hanzo55@yahoo.com</a></p>",
|
||||
"won't be affected by email addresses that have a number before the @ symbol");
|
||||
|
||||
assert.cooked("@EvilTrout yo",
|
||||
|
@ -272,7 +305,7 @@ QUnit.test("Mentions", assert => {
|
|||
"it handles mentions at the beginning of a string");
|
||||
|
||||
assert.cooked("yo\n@EvilTrout",
|
||||
"<p>yo<br/><span class=\"mention\">@EvilTrout</span></p>",
|
||||
"<p>yo<br>\n<span class=\"mention\">@EvilTrout</span></p>",
|
||||
"it handles mentions at the beginning of a new line");
|
||||
|
||||
assert.cooked("`evil` @EvilTrout `trout`",
|
||||
|
@ -280,15 +313,15 @@ QUnit.test("Mentions", assert => {
|
|||
"deals correctly with multiple <code> blocks");
|
||||
|
||||
assert.cooked("```\na @test\n```",
|
||||
"<p><pre><code class=\"lang-auto\">a @test</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">a @test\n</code></pre>",
|
||||
"should not do mentions within a code block.");
|
||||
|
||||
assert.cooked("> foo bar baz @eviltrout",
|
||||
"<blockquote><p>foo bar baz <span class=\"mention\">@eviltrout</span></p></blockquote>",
|
||||
"<blockquote>\n<p>foo bar baz <span class=\"mention\">@eviltrout</span></p>\n</blockquote>",
|
||||
"handles mentions in simple quotes");
|
||||
|
||||
assert.cooked("> foo bar baz @eviltrout ohmagerd\nlook at this",
|
||||
"<blockquote><p>foo bar baz <span class=\"mention\">@eviltrout</span> ohmagerd<br/>look at this</p></blockquote>",
|
||||
"<blockquote>\n<p>foo bar baz <span class=\"mention\">@eviltrout</span> ohmagerd<br>\nlook at this</p>\n</blockquote>",
|
||||
"does mentions properly with trailing text within a simple quote");
|
||||
|
||||
assert.cooked("`code` is okay before @mention",
|
||||
|
@ -312,7 +345,7 @@ QUnit.test("Mentions", assert => {
|
|||
"you can have a mention in an inline code block following a real mention.");
|
||||
|
||||
assert.cooked("1. this is a list\n\n2. this is an @eviltrout mention\n",
|
||||
"<ol><li><p>this is a list</p></li><li><p>this is an <span class=\"mention\">@eviltrout</span> mention</p></li></ol>",
|
||||
"<ol>\n<li>\n<p>this is a list</p>\n</li>\n<li>\n<p>this is an <span class=\"mention\">@eviltrout</span> mention</p>\n</li>\n</ol>",
|
||||
"it mentions properly in a list.");
|
||||
|
||||
assert.cooked("Hello @foo/@bar",
|
||||
|
@ -344,11 +377,11 @@ QUnit.test("Category hashtags", assert => {
|
|||
"it does not translate category hashtag within links");
|
||||
|
||||
assert.cooked("```\n# #category-hashtag\n```",
|
||||
"<p><pre><code class=\"lang-auto\"># #category-hashtag</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\"># #category-hashtag\n</code></pre>",
|
||||
"it does not translate category hashtags to links in code blocks");
|
||||
|
||||
assert.cooked("># #category-hashtag\n",
|
||||
"<blockquote><h1><span class=\"hashtag\">#category-hashtag</span></h1></blockquote>",
|
||||
"<blockquote>\n<h1><span class=\"hashtag\">#category-hashtag</span></h1>\n</blockquote>",
|
||||
"it handles category hashtags in simple quotes");
|
||||
|
||||
assert.cooked("# #category-hashtag",
|
||||
|
@ -359,10 +392,6 @@ QUnit.test("Category hashtags", assert => {
|
|||
"<p>don't <code>#category-hashtag</code></p>",
|
||||
"it does not mention in an inline code block");
|
||||
|
||||
assert.cooked("test #hashtag1/#hashtag2",
|
||||
"<p>test <span class=\"hashtag\">#hashtag1</span>/#hashtag2</p>",
|
||||
"it does not convert category hashtag not bounded by spaces");
|
||||
|
||||
assert.cooked("<small>#category-hashtag</small>",
|
||||
"<p><small><span class=\"hashtag\">#category-hashtag</span></small></p>",
|
||||
"it works between HTML tags");
|
||||
|
@ -374,14 +403,12 @@ QUnit.test("Heading", assert => {
|
|||
});
|
||||
|
||||
QUnit.test("bold and italics", assert => {
|
||||
assert.cooked("a \"**hello**\"", "<p>a \"<strong>hello</strong>\"</p>", "bolds in quotes");
|
||||
assert.cooked("a \"**hello**\"", "<p>a "<strong>hello</strong>"</p>", "bolds in quotes");
|
||||
assert.cooked("(**hello**)", "<p>(<strong>hello</strong>)</p>", "bolds in parens");
|
||||
assert.cooked("**hello**\nworld", "<p><strong>hello</strong><br>world</p>", "allows newline after bold");
|
||||
assert.cooked("**hello**\n**world**", "<p><strong>hello</strong><br><strong>world</strong></p>", "newline between two bolds");
|
||||
assert.cooked("**a*_b**", "<p><strong>a*_b</strong></p>", "allows for characters within bold");
|
||||
assert.cooked("**hello**\nworld", "<p><strong>hello</strong><br>\nworld</p>", "allows newline after bold");
|
||||
assert.cooked("**hello**\n**world**", "<p><strong>hello</strong><br>\n<strong>world</strong></p>", "newline between two bolds");
|
||||
assert.cooked("** hello**", "<p>** hello**</p>", "does not bold on a space boundary");
|
||||
assert.cooked("**hello **", "<p>**hello **</p>", "does not bold on a space boundary");
|
||||
assert.cooked("你**hello**", "<p>你**hello**</p>", "does not bold chinese intra word");
|
||||
assert.cooked("**你hello**", "<p><strong>你hello</strong></p>", "allows bolded chinese");
|
||||
});
|
||||
|
||||
|
@ -391,10 +418,11 @@ QUnit.test("Escaping", assert => {
|
|||
});
|
||||
|
||||
QUnit.test("New Lines", assert => {
|
||||
// Note: This behavior was discussed and we determined it does not make sense to do this
|
||||
// unless you're using traditional line breaks
|
||||
assert.cooked("_abc\ndef_", "<p>_abc<br>def_</p>", "it does not allow markup to span new lines");
|
||||
assert.cooked("_abc\n\ndef_", "<p>_abc</p>\n\n<p>def_</p>", "it does not allow markup to span new paragraphs");
|
||||
// historically we would not continue inline em or b across lines,
|
||||
// however commonmark gives us no switch to do so and we would be very non compliant.
|
||||
// turning softbreaks into a newline is just a renderer option, not a parser switch.
|
||||
assert.cooked("_abc\ndef_", "<p><em>abc<br>\ndef</em></p>", "it does allow inlines to span new lines");
|
||||
assert.cooked("_abc\n\ndef_", "<p>_abc</p>\n<p>def_</p>", "it does not allow inlines to span new paragraphs");
|
||||
});
|
||||
|
||||
QUnit.test("Oneboxing", assert => {
|
||||
|
@ -411,9 +439,9 @@ QUnit.test("Oneboxing", assert => {
|
|||
assert.ok(!matches("http://test.com bob", /onebox/), "doesn't onebox links that have trailing text");
|
||||
|
||||
assert.ok(!matches("[Tom Cruise](http://www.tomcruise.com/)", "onebox"), "Markdown links with labels are not oneboxed");
|
||||
assert.ok(matches("[http://www.tomcruise.com/](http://www.tomcruise.com/)",
|
||||
assert.ok(!matches("[http://www.tomcruise.com/](http://www.tomcruise.com/)",
|
||||
"onebox"),
|
||||
"Markdown links where the label is the same as the url are oneboxed");
|
||||
"Markdown links where the label is the same as the url but link is explicit");
|
||||
|
||||
assert.cooked("http://en.wikipedia.org/wiki/Homicide:_Life_on_the_Street",
|
||||
"<p><a href=\"http://en.wikipedia.org/wiki/Homicide:_Life_on_the_Street\" class=\"onebox\"" +
|
||||
|
@ -431,63 +459,63 @@ QUnit.test("links with full urls", assert => {
|
|||
QUnit.test("Code Blocks", assert => {
|
||||
|
||||
assert.cooked("<pre>\nhello\n</pre>\n",
|
||||
"<p><pre>hello</pre></p>",
|
||||
"<pre>\nhello\n</pre>",
|
||||
"pre blocks don't include extra lines");
|
||||
|
||||
assert.cooked("```\na\nb\nc\n\nd\n```",
|
||||
"<p><pre><code class=\"lang-auto\">a\nb\nc\n\nd</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">a\nb\nc\n\nd\n</code></pre>",
|
||||
"it treats new lines properly");
|
||||
|
||||
assert.cooked("```\ntest\n```",
|
||||
"<p><pre><code class=\"lang-auto\">test</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">test\n</code></pre>",
|
||||
"it supports basic code blocks");
|
||||
|
||||
assert.cooked("```json\n{hello: 'world'}\n```\ntrailing",
|
||||
"<p><pre><code class=\"lang-json\">{hello: 'world'}</code></pre></p>\n\n<p>trailing</p>",
|
||||
"<pre><code class=\"lang-json\">{hello: 'world'}\n</code></pre>\n<p>trailing</p>",
|
||||
"It does not truncate text after a code block.");
|
||||
|
||||
assert.cooked("```json\nline 1\n\nline 2\n\n\nline3\n```",
|
||||
"<p><pre><code class=\"lang-json\">line 1\n\nline 2\n\n\nline3</code></pre></p>",
|
||||
"<pre><code class=\"lang-json\">line 1\n\nline 2\n\n\nline3\n</code></pre>",
|
||||
"it maintains new lines inside a code block.");
|
||||
|
||||
assert.cooked("hello\nworld\n```json\nline 1\n\nline 2\n\n\nline3\n```",
|
||||
"<p>hello<br/>world<br/></p>\n\n<p><pre><code class=\"lang-json\">line 1\n\nline 2\n\n\nline3</code></pre></p>",
|
||||
"<p>hello<br>\nworld</p>\n<pre><code class=\"lang-json\">line 1\n\nline 2\n\n\nline3\n</code></pre>",
|
||||
"it maintains new lines inside a code block with leading content.");
|
||||
|
||||
assert.cooked("```ruby\n<header>hello</header>\n```",
|
||||
"<p><pre><code class=\"lang-ruby\"><header>hello</header></code></pre></p>",
|
||||
"<pre><code class=\"lang-ruby\"><header>hello</header>\n</code></pre>",
|
||||
"it escapes code in the code block");
|
||||
|
||||
assert.cooked("```text\ntext\n```",
|
||||
"<p><pre><code class=\"lang-nohighlight\">text</code></pre></p>",
|
||||
"<pre><code class=\"lang-nohighlight\">text\n</code></pre>",
|
||||
"handles text by adding nohighlight");
|
||||
|
||||
assert.cooked("```ruby\n# cool\n```",
|
||||
"<p><pre><code class=\"lang-ruby\"># cool</code></pre></p>",
|
||||
"<pre><code class=\"lang-ruby\"># cool\n</code></pre>",
|
||||
"it supports changing the language");
|
||||
|
||||
assert.cooked(" ```\n hello\n ```",
|
||||
"<pre><code>```\nhello\n```</code></pre>",
|
||||
"<pre><code>```\nhello\n```</code></pre>",
|
||||
"only detect ``` at the beginning of lines");
|
||||
|
||||
assert.cooked("```ruby\ndef self.parse(text)\n\n text\nend\n```",
|
||||
"<p><pre><code class=\"lang-ruby\">def self.parse(text)\n\n text\nend</code></pre></p>",
|
||||
"<pre><code class=\"lang-ruby\">def self.parse(text)\n\n text\nend\n</code></pre>",
|
||||
"it allows leading spaces on lines in a code block.");
|
||||
|
||||
assert.cooked("```ruby\nhello `eviltrout`\n```",
|
||||
"<p><pre><code class=\"lang-ruby\">hello `eviltrout`</code></pre></p>",
|
||||
"<pre><code class=\"lang-ruby\">hello `eviltrout`\n</code></pre>",
|
||||
"it allows code with backticks in it");
|
||||
|
||||
assert.cooked("```eviltrout\nhello\n```",
|
||||
"<p><pre><code class=\"lang-auto\">hello</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">hello\n</code></pre>",
|
||||
"it doesn't not whitelist all classes");
|
||||
|
||||
assert.cooked("```\n[quote=\"sam, post:1, topic:9441, full:true\"]This is `<not>` a bug.[/quote]\n```",
|
||||
"<p><pre><code class=\"lang-auto\">[quote="sam, post:1, topic:9441, full:true"]This is `<not>` a bug.[/quote]</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">[quote="sam, post:1, topic:9441, full:true"]This is `<not>` a bug.[/quote]\n</code></pre>",
|
||||
"it allows code with backticks in it");
|
||||
|
||||
assert.cooked(" hello\n<blockquote>test</blockquote>",
|
||||
"<pre><code>hello</code></pre>\n\n<blockquote>test</blockquote>",
|
||||
"<pre><code>hello\n</code></pre>\n<blockquote>test</blockquote>",
|
||||
"it allows an indented code block to by followed by a `<blockquote>`");
|
||||
|
||||
assert.cooked("``` foo bar ```",
|
||||
|
@ -495,7 +523,7 @@ QUnit.test("Code Blocks", assert => {
|
|||
"it tolerates misuse of code block tags as inline code");
|
||||
|
||||
assert.cooked("```\nline1\n```\n```\nline2\n\nline3\n```",
|
||||
"<p><pre><code class=\"lang-auto\">line1</code></pre></p>\n\n<p><pre><code class=\"lang-auto\">line2\n\nline3</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">line1\n</code></pre>\n<pre><code class=\"lang-auto\">line2\n\nline3\n</code></pre>",
|
||||
"it does not consume next block's trailing newlines");
|
||||
|
||||
assert.cooked(" <pre>test</pre>",
|
||||
|
@ -507,22 +535,22 @@ QUnit.test("Code Blocks", assert => {
|
|||
"it does not parse other block types in markdown code blocks");
|
||||
|
||||
assert.cooked("## a\nb\n```\nc\n```",
|
||||
"<h2>a</h2>\n\n<p><pre><code class=\"lang-auto\">c</code></pre></p>",
|
||||
"<h2>a</h2>\n<p>b</p>\n<pre><code class=\"lang-auto\">c\n</code></pre>",
|
||||
"it handles headings with code blocks after them.");
|
||||
});
|
||||
|
||||
QUnit.test("URLs in BBCode tags", assert => {
|
||||
|
||||
assert.cooked("[img]http://eviltrout.com/eviltrout.png[/img][img]http://samsaffron.com/samsaffron.png[/img]",
|
||||
"<p><img src=\"http://eviltrout.com/eviltrout.png\"/><img src=\"http://samsaffron.com/samsaffron.png\"/></p>",
|
||||
"<p><img src=\"http://eviltrout.com/eviltrout.png\" alt/><img src=\"http://samsaffron.com/samsaffron.png\" alt/></p>",
|
||||
"images are properly parsed");
|
||||
|
||||
assert.cooked("[url]http://discourse.org[/url]",
|
||||
"<p><a href=\"http://discourse.org\">http://discourse.org</a></p>",
|
||||
"<p><a href=\"http://discourse.org\" data-bbcode=\"true\">http://discourse.org</a></p>",
|
||||
"links are properly parsed");
|
||||
|
||||
assert.cooked("[url=http://discourse.org]discourse[/url]",
|
||||
"<p><a href=\"http://discourse.org\">discourse</a></p>",
|
||||
"<p><a href=\"http://discourse.org\" data-bbcode=\"true\">discourse</a></p>",
|
||||
"named links are properly parsed");
|
||||
|
||||
});
|
||||
|
@ -533,39 +561,39 @@ QUnit.test("images", assert => {
|
|||
"It allows images with links around them");
|
||||
|
||||
assert.cooked("<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==\" alt=\"Red dot\">",
|
||||
"<p><img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==\" alt=\"Red dot\"></p>",
|
||||
"<p>\n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==\" alt=\"Red dot\"></p>",
|
||||
"It allows data images");
|
||||
});
|
||||
|
||||
QUnit.test("censoring", assert => {
|
||||
assert.cooked("aw shucks, golly gee whiz.",
|
||||
"<p>aw ■■■■■■, golly gee ■■■■.</p>",
|
||||
"<p>aw ■■■■■■, golly gee ■■■■.</p>",
|
||||
"it censors words in the Site Settings");
|
||||
|
||||
assert.cooked("you are a whizzard! I love cheesewhiz. Whiz.",
|
||||
"<p>you are a whizzard! I love cheesewhiz. ■■■■.</p>",
|
||||
"<p>you are a whizzard! I love cheesewhiz. ■■■■.</p>",
|
||||
"it doesn't censor words unless they have boundaries.");
|
||||
|
||||
assert.cooked("you are a whizzer! I love cheesewhiz. Whiz.",
|
||||
"<p>you are a ■■■■■■■! I love cheesewhiz. ■■■■.</p>",
|
||||
"<p>you are a ■■■■■■■! I love cheesewhiz. ■■■■.</p>",
|
||||
"it censors words even if previous partial matches exist.");
|
||||
|
||||
assert.cooked("The link still works. [whiz](http://www.whiz.com)",
|
||||
"<p>The link still works. <a href=\"http://www.whiz.com\">■■■■</a></p>",
|
||||
"<p>The link still works. <a href=\"http://www.whiz.com\">■■■■</a></p>",
|
||||
"it won't break links by censoring them.");
|
||||
|
||||
assert.cooked("Call techapj the computer whiz at 555-555-1234 for free help.",
|
||||
"<p>Call ■■■■■■■ the computer ■■■■ at 555-■■■■■■■■ for free help.</p>",
|
||||
"<p>Call ■■■■■■■ the computer ■■■■ at 555-■■■■■■■■ for free help.</p>",
|
||||
"uses both censored words and patterns from site settings");
|
||||
|
||||
assert.cooked("I have a pen, I have an a**le",
|
||||
"<p>I have a pen, I have an ■■■■■</p>",
|
||||
"<p>I have a pen, I have an ■■■■■</p>",
|
||||
"it escapes regexp chars");
|
||||
});
|
||||
|
||||
QUnit.test("code blocks/spans hoisting", assert => {
|
||||
assert.cooked("```\n\n some code\n```",
|
||||
"<p><pre><code class=\"lang-auto\"> some code</code></pre></p>",
|
||||
"<pre><code class=\"lang-auto\">\n some code\n</code></pre>",
|
||||
"it works when nesting standard markdown code blocks within a fenced code block");
|
||||
|
||||
assert.cooked("`$&`",
|
||||
|
@ -578,47 +606,42 @@ QUnit.test('basic bbcode', assert => {
|
|||
assert.cookedPara("[i]emphasis[/i]", "<span class=\"bbcode-i\">emphasis</span>", "italics text");
|
||||
assert.cookedPara("[u]underlined[/u]", "<span class=\"bbcode-u\">underlined</span>", "underlines text");
|
||||
assert.cookedPara("[s]strikethrough[/s]", "<span class=\"bbcode-s\">strikethrough</span>", "strikes-through text");
|
||||
assert.cookedPara("[img]http://eviltrout.com/eviltrout.png[/img]", "<img src=\"http://eviltrout.com/eviltrout.png\">", "links images");
|
||||
assert.cookedPara("[email]eviltrout@mailinator.com[/email]", "<a href=\"mailto:eviltrout@mailinator.com\">eviltrout@mailinator.com</a>", "supports [email] without a title");
|
||||
assert.cookedPara("[img]http://eviltrout.com/eviltrout.png[/img]", "<img src=\"http://eviltrout.com/eviltrout.png\" alt>", "links images");
|
||||
assert.cookedPara("[email]eviltrout@mailinator.com[/email]", "<a href=\"mailto:eviltrout@mailinator.com\" data-bbcode=\"true\">eviltrout@mailinator.com</a>", "supports [email] without a title");
|
||||
assert.cookedPara("[b]evil [i]trout[/i][/b]",
|
||||
"<span class=\"bbcode-b\">evil <span class=\"bbcode-i\">trout</span></span>",
|
||||
"allows embedding of tags");
|
||||
assert.cookedPara("[EMAIL]eviltrout@mailinator.com[/EMAIL]", "<a href=\"mailto:eviltrout@mailinator.com\">eviltrout@mailinator.com</a>", "supports upper case bbcode");
|
||||
assert.cookedPara("[EMAIL]eviltrout@mailinator.com[/EMAIL]", "<a href=\"mailto:eviltrout@mailinator.com\" data-bbcode=\"true\">eviltrout@mailinator.com</a>", "supports upper case bbcode");
|
||||
assert.cookedPara("[b]strong [b]stronger[/b][/b]", "<span class=\"bbcode-b\">strong <span class=\"bbcode-b\">stronger</span></span>", "accepts nested bbcode tags");
|
||||
});
|
||||
|
||||
QUnit.test('urls', assert => {
|
||||
assert.cookedPara("[url]not a url[/url]", "not a url", "supports [url] that isn't a url");
|
||||
assert.cookedPara("[url]abc.com[/url]", "abc.com", "no error when a url has no protocol and begins with a");
|
||||
assert.cookedPara("[url]http://bettercallsaul.com[/url]", "<a href=\"http://bettercallsaul.com\">http://bettercallsaul.com</a>", "supports [url] without parameter");
|
||||
assert.cookedPara("[url=http://example.com]example[/url]", "<a href=\"http://example.com\">example</a>", "supports [url] with given href");
|
||||
assert.cookedPara("[url]abc.com[/url]", "<a href=\"http://abc.com\">abc.com</a>", "it magically links using linkify");
|
||||
assert.cookedPara("[url]http://bettercallsaul.com[/url]", "<a href=\"http://bettercallsaul.com\" data-bbcode=\"true\">http://bettercallsaul.com</a>", "supports [url] without parameter");
|
||||
assert.cookedPara("[url=http://example.com]example[/url]", "<a href=\"http://example.com\" data-bbcode=\"true\">example</a>", "supports [url] with given href");
|
||||
assert.cookedPara("[url=http://www.example.com][img]http://example.com/logo.png[/img][/url]",
|
||||
"<a href=\"http://www.example.com\"><img src=\"http://example.com/logo.png\"></a>",
|
||||
"<a href=\"http://www.example.com\" data-bbcode=\"true\"><img src=\"http://example.com/logo.png\" alt></a>",
|
||||
"supports [url] with an embedded [img]");
|
||||
});
|
||||
QUnit.test('invalid bbcode', assert => {
|
||||
const result = new PrettyText({ lookupAvatar: false }).cook("[code]I am not closed\n\nThis text exists.");
|
||||
assert.equal(result, "<p>[code]I am not closed</p>\n\n<p>This text exists.</p>", "does not raise an error with an open bbcode tag.");
|
||||
assert.cooked("[code]I am not closed\n\nThis text exists.",
|
||||
"<p>[code]I am not closed</p>\n<p>This text exists.</p>",
|
||||
"does not raise an error with an open bbcode tag.");
|
||||
});
|
||||
|
||||
QUnit.test('code', assert => {
|
||||
assert.cookedPara("[code]\nx++\n[/code]", "<pre><code class=\"lang-auto\">x++</code></pre>", "makes code into pre");
|
||||
assert.cookedPara("[code]\nx++\ny++\nz++\n[/code]", "<pre><code class=\"lang-auto\">x++\ny++\nz++</code></pre>", "makes code into pre");
|
||||
assert.cookedPara("[code]abc\n#def\n[/code]", '<pre><code class=\"lang-auto\">abc\n#def</code></pre>', 'it handles headings in a [code] block');
|
||||
assert.cookedPara("[code]\n s[/code]",
|
||||
assert.cooked("[code]\nx++\n[/code]", "<pre><code class=\"lang-auto\">x++</code></pre>", "makes code into pre");
|
||||
assert.cooked("[code]\nx++\ny++\nz++\n[/code]", "<pre><code class=\"lang-auto\">x++\ny++\nz++</code></pre>", "makes code into pre");
|
||||
assert.cooked("[code]\nabc\n#def\n[/code]", '<pre><code class=\"lang-auto\">abc\n#def</code></pre>', 'it handles headings in a [code] block');
|
||||
assert.cooked("[code]\n s\n[/code]",
|
||||
"<pre><code class=\"lang-auto\"> s</code></pre>",
|
||||
"it doesn't trim leading whitespace");
|
||||
});
|
||||
|
||||
QUnit.test('lists', assert => {
|
||||
assert.cookedPara("[ul][li]option one[/li][/ul]", "<ul><li>option one</li></ul>", "creates an ul");
|
||||
assert.cookedPara("[ol][li]option one[/li][/ol]", "<ol><li>option one</li></ol>", "creates an ol");
|
||||
assert.cookedPara("[ul]\n[li]option one[/li]\n[li]option two[/li]\n[/ul]", "<ul><li>option one</li><li>option two</li></ul>", "suppresses empty lines in lists");
|
||||
});
|
||||
|
||||
QUnit.test('tags with arguments', assert => {
|
||||
assert.cookedPara("[url=http://bettercallsaul.com]better call![/url]", "<a href=\"http://bettercallsaul.com\">better call!</a>", "supports [url] with a title");
|
||||
assert.cookedPara("[email=eviltrout@mailinator.com]evil trout[/email]", "<a href=\"mailto:eviltrout@mailinator.com\">evil trout</a>", "supports [email] with a title");
|
||||
assert.cookedPara("[url=http://bettercallsaul.com]better call![/url]", "<a href=\"http://bettercallsaul.com\" data-bbcode=\"true\">better call!</a>", "supports [url] with a title");
|
||||
assert.cookedPara("[email=eviltrout@mailinator.com]evil trout[/email]", "<a href=\"mailto:eviltrout@mailinator.com\" data-bbcode=\"true\">evil trout</a>", "supports [email] with a title");
|
||||
assert.cookedPara("[u][i]abc[/i][/u]", "<span class=\"bbcode-u\"><span class=\"bbcode-i\">abc</span></span>", "can nest tags");
|
||||
assert.cookedPara("[b]first[/b] [b]second[/b]", "<span class=\"bbcode-b\">first</span> <span class=\"bbcode-b\">second</span>", "can bold two things on the same line");
|
||||
});
|
||||
|
@ -658,70 +681,140 @@ QUnit.test("quotes", assert => {
|
|||
"[quote=\"eviltrout, post:1, topic:2\"]\nthis is <not> a bug\n[/quote]\n\n",
|
||||
"it escapes the contents of the quote");
|
||||
|
||||
assert.cookedPara("[quote]test[/quote]",
|
||||
"<aside class=\"quote\"><blockquote><p>test</p></blockquote></aside>",
|
||||
assert.cooked("[quote]\ntest\n[/quote]",
|
||||
"<aside class=\"quote\">\n<blockquote>\n<p>test</p>\n</blockquote>\n</aside>",
|
||||
"it supports quotes without params");
|
||||
|
||||
assert.cookedPara("[quote]\n*test*\n[/quote]",
|
||||
"<aside class=\"quote\"><blockquote><p><em>test</em></p></blockquote></aside>",
|
||||
assert.cooked("[quote]\n*test*\n[/quote]",
|
||||
"<aside class=\"quote\">\n<blockquote>\n<p><em>test</em></p>\n</blockquote>\n</aside>",
|
||||
"it doesn't insert a new line for italics");
|
||||
|
||||
assert.cookedPara("[quote=,script='a'><script>alert('test');//':a][/quote]",
|
||||
"<aside class=\"quote\"><blockquote></blockquote></aside>",
|
||||
assert.cooked("[quote=,script='a'><script>alert('test');//':a]\n[/quote]",
|
||||
"<aside class=\"quote\">\n<blockquote></blockquote>\n</aside>",
|
||||
"It will not create a script tag within an attribute");
|
||||
});
|
||||
|
||||
QUnit.test("quote formatting", assert => {
|
||||
|
||||
assert.cooked("[quote=\"EvilTrout, post:123, topic:456, full:true\"][sam][/quote]",
|
||||
"<aside class=\"quote\" data-post=\"123\" data-topic=\"456\" data-full=\"true\"><div class=\"title\">" +
|
||||
"<div class=\"quote-controls\"></div>EvilTrout:</div><blockquote><p>[sam]</p></blockquote></aside>",
|
||||
assert.cooked("[quote=\"EvilTrout, post:123, topic:456, full:true\"]\n[sam]\n[/quote]",
|
||||
`<aside class=\"quote\" data-post=\"123\" data-topic=\"456\" data-full=\"true\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
EvilTrout:</div>
|
||||
<blockquote>
|
||||
<p>[sam]</p>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"it allows quotes with [] inside");
|
||||
|
||||
assert.cooked("[quote=\"eviltrout, post:1, topic:1\"]abc[/quote]",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>eviltrout:" +
|
||||
"</div><blockquote><p>abc</p></blockquote></aside>",
|
||||
assert.cooked("[quote=\"eviltrout, post:1, topic:1\"]\nabc\n[/quote]",
|
||||
`<aside class=\"quote\" data-post=\"1\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
eviltrout:</div>
|
||||
<blockquote>
|
||||
<p>abc</p>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"renders quotes properly");
|
||||
|
||||
assert.cooked("[quote=\"eviltrout, post:1, topic:1\"]abc[/quote]\nhello",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>eviltrout:" +
|
||||
"</div><blockquote><p>abc</p></blockquote></aside>\n\n<p>hello</p>",
|
||||
assert.cooked("[quote=\"eviltrout, post:1, topic:1\"]\nabc\n[/quote]\nhello",
|
||||
`<aside class=\"quote\" data-post=\"1\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
eviltrout:</div>
|
||||
<blockquote>
|
||||
<p>abc</p>
|
||||
</blockquote>
|
||||
</aside>
|
||||
<p>hello</p>`,
|
||||
"handles new lines properly");
|
||||
|
||||
assert.cooked("[quote=\"Alice, post:1, topic:1\"]\n[quote=\"Bob, post:2, topic:1\"]\n[/quote]\n[/quote]",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>Alice:" +
|
||||
"</div><blockquote><aside class=\"quote\" data-post=\"2\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>Bob:" +
|
||||
"</div><blockquote></blockquote></aside></blockquote></aside>",
|
||||
`<aside class=\"quote\" data-post=\"1\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
Alice:</div>
|
||||
<blockquote>
|
||||
<aside class=\"quote\" data-post=\"2\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
Bob:</div>
|
||||
<blockquote></blockquote>
|
||||
</aside>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"quotes can be nested");
|
||||
|
||||
assert.cooked("[quote=\"Alice, post:1, topic:1\"]\n[quote=\"Bob, post:2, topic:1\"]\n[/quote]",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>Alice:" +
|
||||
"</div><blockquote><p>[quote=\"Bob, post:2, topic:1\"]</p></blockquote></aside>",
|
||||
"handles mismatched nested quote tags");
|
||||
`<p>[quote="Alice, post:1, topic:1"]</p>
|
||||
<aside class=\"quote\" data-post=\"2\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
Bob:</div>
|
||||
<blockquote></blockquote>
|
||||
</aside>`,
|
||||
|
||||
"handles mismatched nested quote tags (non greedy)");
|
||||
|
||||
assert.cooked("[quote=\"Alice, post:1, topic:1\"]\n```javascript\nvar foo ='foo';\nvar bar = 'bar';\n```\n[/quote]",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>Alice:</div><blockquote><p><pre><code class=\"lang-javascript\">var foo ='foo';\nvar bar = 'bar';</code></pre></p></blockquote></aside>",
|
||||
`<aside class=\"quote\" data-post=\"1\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
Alice:</div>
|
||||
<blockquote>
|
||||
<pre><code class=\"lang-javascript\">var foo ='foo';
|
||||
var bar = 'bar';
|
||||
</code></pre>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"quotes can have code blocks without leading newline");
|
||||
|
||||
assert.cooked("[quote=\"Alice, post:1, topic:1\"]\n\n```javascript\nvar foo ='foo';\nvar bar = 'bar';\n```\n[/quote]",
|
||||
"<aside class=\"quote\" data-post=\"1\" data-topic=\"1\"><div class=\"title\"><div class=\"quote-controls\"></div>Alice:</div><blockquote><p><pre><code class=\"lang-javascript\">var foo ='foo';\nvar bar = 'bar';</code></pre></p></blockquote></aside>",
|
||||
`<aside class=\"quote\" data-post=\"1\" data-topic=\"1\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
Alice:</div>
|
||||
<blockquote>
|
||||
<pre><code class=\"lang-javascript\">var foo ='foo';
|
||||
var bar = 'bar';
|
||||
</code></pre>
|
||||
</blockquote>
|
||||
</aside>`,
|
||||
"quotes can have code blocks with leading newline");
|
||||
});
|
||||
|
||||
QUnit.test("quotes with trailing formatting", assert => {
|
||||
const result = new PrettyText(defaultOpts).cook("[quote=\"EvilTrout, post:123, topic:456, full:true\"]\nhello\n[/quote]\n*Test*");
|
||||
assert.equal(result,
|
||||
"<aside class=\"quote\" data-post=\"123\" data-topic=\"456\" data-full=\"true\"><div class=\"title\">" +
|
||||
"<div class=\"quote-controls\"></div>EvilTrout:</div><blockquote><p>hello</p></blockquote></aside>\n\n<p><em>Test</em></p>",
|
||||
`<aside class=\"quote\" data-post=\"123\" data-topic=\"456\" data-full=\"true\">
|
||||
<div class=\"title\">
|
||||
<div class=\"quote-controls\"></div>
|
||||
EvilTrout:</div>
|
||||
<blockquote>
|
||||
<p>hello</p>
|
||||
</blockquote>
|
||||
</aside>
|
||||
<p><em>Test</em></p>`,
|
||||
"it allows trailing formatting");
|
||||
});
|
||||
|
||||
QUnit.test("enable/disable features", assert => {
|
||||
const table = `<table><tr><th>hello</th></tr><tr><td>world</td></tr></table>`;
|
||||
const hasTable = new PrettyText({ features: {table: true}, sanitize: true}).cook(table);
|
||||
assert.equal(hasTable, `<table class="md-table"><tr><th>hello</th></tr><tr><td>world</td></tr></table>`);
|
||||
|
||||
const noTable = new PrettyText({ features: { table: false }, sanitize: true}).cook(table);
|
||||
assert.equal(noTable, `<p></p>`, 'tables are stripped when disabled');
|
||||
assert.cookedOptions('|a|\n--\n|a|', { features: {table: false} }, '');
|
||||
assert.cooked('|a|\n--\n|a|',
|
||||
`<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>a</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>a</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>`);
|
||||
});
|
||||
|
||||
QUnit.test("emoji", assert => {
|
||||
|
@ -732,6 +825,6 @@ QUnit.test("emoji", assert => {
|
|||
|
||||
QUnit.test("emoji - emojiSet", assert => {
|
||||
assert.cookedOptions(":smile:",
|
||||
{ emojiSet: 'twitter' },
|
||||
{ siteSettings : { emoji_set: 'twitter' }},
|
||||
`<p><img src="/images/emoji/twitter/smile.png?v=${v}" title=":smile:" class="emoji" alt=":smile:"></p>`);
|
||||
});
|
||||
|
|
|
@ -12,15 +12,15 @@ QUnit.test("sanitize", assert => {
|
|||
assert.equal(pt.sanitize("<div><p class=\"funky\" wrong='1'>hello</p></div>"), "<div><p>hello</p></div>");
|
||||
assert.equal(pt.sanitize("<3 <3"), "<3 <3");
|
||||
assert.equal(pt.sanitize("<_<"), "<_<");
|
||||
|
||||
cooked("hello<script>alert(42)</script>", "<p>hello</p>", "it sanitizes while cooking");
|
||||
|
||||
cooked("<a href='http://disneyland.disney.go.com/'>disney</a> <a href='http://reddit.com'>reddit</a>",
|
||||
"<p><a href=\"http://disneyland.disney.go.com/\">disney</a> <a href=\"http://reddit.com\">reddit</a></p>",
|
||||
"we can embed proper links");
|
||||
|
||||
cooked("<center>hello</center>", "<p>hello</p>", "it does not allow centering");
|
||||
cooked("<table><tr><td>hello</td></tr></table>\nafter", "<p>after</p>", "it does not allow tables");
|
||||
cooked("<blockquote>a\n</blockquote>\n", "<blockquote>a\n\n<br/>\n\n</blockquote>", "it does not double sanitize");
|
||||
cooked("<center>hello</center>", "hello", "it does not allow centering");
|
||||
cooked("<blockquote>a\n</blockquote>\n", "<blockquote>a\n</blockquote>", "it does not double sanitize");
|
||||
|
||||
cooked("<iframe src=\"http://discourse.org\" width=\"100\" height=\"42\"></iframe>", "", "it does not allow most iframes");
|
||||
|
||||
|
@ -38,9 +38,9 @@ QUnit.test("sanitize", assert => {
|
|||
assert.equal(pt.sanitize("<progress>hello"), "hello");
|
||||
assert.equal(pt.sanitize("<mark>highlight</mark>"), "highlight");
|
||||
|
||||
cooked("[the answer](javascript:alert(42))", "<p><a>the answer</a></p>", "it prevents XSS");
|
||||
cooked("[the answer](javascript:alert(42))", "<p>[the answer](javascript:alert(42))</p>", "it prevents XSS");
|
||||
|
||||
cooked("<i class=\"fa fa-bug fa-spin\" style=\"font-size:600%\"></i>\n<!-- -->", "<p><i></i><br/></p>", "it doesn't circumvent XSS with comments");
|
||||
cooked("<i class=\"fa fa-bug fa-spin\" style=\"font-size:600%\"></i>\n<!-- -->", "<p><i></i></p>", "it doesn't circumvent XSS with comments");
|
||||
|
||||
cooked("<span class=\"-bbcode-s fa fa-spin\">a</span>", "<p><span>a</span></p>", "it sanitizes spans");
|
||||
cooked("<span class=\"fa fa-spin -bbcode-s\">a</span>", "<p><span>a</span></p>", "it sanitizes spans");
|
||||
|
|
48
test/javascripts/lib/white-lister-test.js.es6
Normal file
48
test/javascripts/lib/white-lister-test.js.es6
Normal file
|
@ -0,0 +1,48 @@
|
|||
import WhiteLister from 'pretty-text/white-lister';
|
||||
|
||||
QUnit.module("lib:whiteLister");
|
||||
|
||||
QUnit.test("whiteLister", assert => {
|
||||
const whiteLister = new WhiteLister();
|
||||
|
||||
assert.ok(Object.keys(whiteLister.getWhiteList().tagList).length > 1, "should have some defaults");
|
||||
|
||||
whiteLister.disable("default");
|
||||
|
||||
assert.ok(Object.keys(whiteLister.getWhiteList().tagList).length === 0, "should have no defaults if disabled");
|
||||
|
||||
whiteLister.whiteListFeature("test", [
|
||||
'custom.foo',
|
||||
'custom.baz',
|
||||
'custom[data-*]',
|
||||
'custom[rel=nofollow]'
|
||||
]);
|
||||
|
||||
whiteLister.whiteListFeature("test", [
|
||||
'custom[rel=test]'
|
||||
]);
|
||||
|
||||
whiteLister.enable("test");
|
||||
|
||||
assert.deepEqual(whiteLister.getWhiteList(), {
|
||||
tagList: {
|
||||
custom: []
|
||||
},
|
||||
attrList: {
|
||||
custom: {
|
||||
"class": ["foo", "baz"],
|
||||
"data-*": ["*"],
|
||||
"rel": ["nofollow", "test"]
|
||||
}
|
||||
}
|
||||
}, 'Expecting a correct white list');
|
||||
|
||||
|
||||
whiteLister.disable("test");
|
||||
|
||||
assert.deepEqual(whiteLister.getWhiteList(), {
|
||||
tagList: {},
|
||||
attrList: {}
|
||||
}, 'Expecting an empty white list');
|
||||
|
||||
});
|
|
@ -1,21 +0,0 @@
|
|||
AT&T has an ampersand in their name.
|
||||
|
||||
AT&T is another way to write it.
|
||||
|
||||
This & that.
|
||||
|
||||
4 < 5.
|
||||
|
||||
6 > 5.
|
||||
|
||||
Here's a [link] [1] with an ampersand in the URL.
|
||||
|
||||
Here's a link with an amersand in the link text: [AT&T] [2].
|
||||
|
||||
Here's an inline [link](/script?foo=1&bar=2).
|
||||
|
||||
Here's an inline [link](</script?foo=1&bar=2>).
|
||||
|
||||
|
||||
[1]: http://example.com/?foo=1&bar=2
|
||||
[2]: http://att.com/ "AT&T"
|
|
@ -1,17 +0,0 @@
|
|||
<p>AT&T has an ampersand in their name.</p>
|
||||
|
||||
<p>AT&T is another way to write it.</p>
|
||||
|
||||
<p>This & that.</p>
|
||||
|
||||
<p>4 < 5.</p>
|
||||
|
||||
<p>6 > 5.</p>
|
||||
|
||||
<p>Here's a <a href="http://example.com/?foo=1&bar=2">link</a> with an ampersand in the URL.</p>
|
||||
|
||||
<p>Here's a link with an amersand in the link text: <a href="http://att.com/" title="AT&T">AT&T</a>.</p>
|
||||
|
||||
<p>Here's an inline <a href="/script?foo=1&bar=2">link</a>.</p>
|
||||
|
||||
<p>Here's an inline <a href="/script?foo=1&bar=2">link</a>.</p>
|
|
@ -1,13 +0,0 @@
|
|||
Link: <http://example.com/>.
|
||||
|
||||
With an ampersand: <http://example.com/?foo=1&bar=2>
|
||||
|
||||
* In a list?
|
||||
* <http://example.com/>
|
||||
* It should.
|
||||
|
||||
> Blockquoted: <http://example.com/>
|
||||
|
||||
Auto-links should not occur here: `<http://example.com/>`
|
||||
|
||||
or here: <http://example.com/>
|
|
@ -1,18 +0,0 @@
|
|||
<p>Link: <a href="http://example.com/">http://example.com/</a>.</p>
|
||||
|
||||
<p>With an ampersand: <a href="http://example.com/?foo=1&bar=2">http://example.com/?foo=1&bar=2</a></p>
|
||||
|
||||
<ul>
|
||||
<li>In a list?</li>
|
||||
<li><a href="http://example.com/">http://example.com/</a></li>
|
||||
<li>It should.</li>
|
||||
</ul>
|
||||
|
||||
<blockquote>
|
||||
<p>Blockquoted: <a href="http://example.com/">http://example.com/</a></p>
|
||||
</blockquote>
|
||||
|
||||
<p>Auto-links should not occur here: <code><http://example.com/></code></p>
|
||||
|
||||
<pre><code>or here: <http://example.com/>
|
||||
</code></pre>
|
|
@ -1,120 +0,0 @@
|
|||
These should all get escaped:
|
||||
|
||||
Backslash: \\
|
||||
|
||||
Backtick: \`
|
||||
|
||||
Asterisk: \*
|
||||
|
||||
Underscore: \_
|
||||
|
||||
Left brace: \{
|
||||
|
||||
Right brace: \}
|
||||
|
||||
Left bracket: \[
|
||||
|
||||
Right bracket: \]
|
||||
|
||||
Left paren: \(
|
||||
|
||||
Right paren: \)
|
||||
|
||||
Greater-than: \>
|
||||
|
||||
Hash: \#
|
||||
|
||||
Period: \.
|
||||
|
||||
Bang: \!
|
||||
|
||||
Plus: \+
|
||||
|
||||
Minus: \-
|
||||
|
||||
|
||||
|
||||
These should not, because they occur within a code block:
|
||||
|
||||
Backslash: \\
|
||||
|
||||
Backtick: \`
|
||||
|
||||
Asterisk: \*
|
||||
|
||||
Underscore: \_
|
||||
|
||||
Left brace: \{
|
||||
|
||||
Right brace: \}
|
||||
|
||||
Left bracket: \[
|
||||
|
||||
Right bracket: \]
|
||||
|
||||
Left paren: \(
|
||||
|
||||
Right paren: \)
|
||||
|
||||
Greater-than: \>
|
||||
|
||||
Hash: \#
|
||||
|
||||
Period: \.
|
||||
|
||||
Bang: \!
|
||||
|
||||
Plus: \+
|
||||
|
||||
Minus: \-
|
||||
|
||||
|
||||
Nor should these, which occur in code spans:
|
||||
|
||||
Backslash: `\\`
|
||||
|
||||
Backtick: `\``
|
||||
|
||||
Asterisk: `\*`
|
||||
|
||||
Underscore: `\_`
|
||||
|
||||
Left brace: `\{`
|
||||
|
||||
Right brace: `\}`
|
||||
|
||||
Left bracket: `\[`
|
||||
|
||||
Right bracket: `\]`
|
||||
|
||||
Left paren: `\(`
|
||||
|
||||
Right paren: `\)`
|
||||
|
||||
Greater-than: `\>`
|
||||
|
||||
Hash: `\#`
|
||||
|
||||
Period: `\.`
|
||||
|
||||
Bang: `\!`
|
||||
|
||||
Plus: `\+`
|
||||
|
||||
Minus: `\-`
|
||||
|
||||
|
||||
These should get escaped, even though they're matching pairs for
|
||||
other Markdown constructs:
|
||||
|
||||
\*asterisks\*
|
||||
|
||||
\_underscores\_
|
||||
|
||||
\`backticks\`
|
||||
|
||||
This is a code span with a literal backslash-backtick sequence: `\``
|
||||
|
||||
This is a tag with unescaped backticks <span attr='`ticks`'>bar</span>.
|
||||
|
||||
This is a tag with backslashes <span attr='\\backslashes\\'>bar</span>.
|
|
@ -1,118 +0,0 @@
|
|||
<p>These should all get escaped:</p>
|
||||
|
||||
<p>Backslash: \</p>
|
||||
|
||||
<p>Backtick: `</p>
|
||||
|
||||
<p>Asterisk: *</p>
|
||||
|
||||
<p>Underscore: _</p>
|
||||
|
||||
<p>Left brace: {</p>
|
||||
|
||||
<p>Right brace: }</p>
|
||||
|
||||
<p>Left bracket: [</p>
|
||||
|
||||
<p>Right bracket: ]</p>
|
||||
|
||||
<p>Left paren: (</p>
|
||||
|
||||
<p>Right paren: )</p>
|
||||
|
||||
<p>Greater-than: ></p>
|
||||
|
||||
<p>Hash: #</p>
|
||||
|
||||
<p>Period: .</p>
|
||||
|
||||
<p>Bang: !</p>
|
||||
|
||||
<p>Plus: +</p>
|
||||
|
||||
<p>Minus: -</p>
|
||||
|
||||
<p>These should not, because they occur within a code block:</p>
|
||||
|
||||
<pre><code>Backslash: \\
|
||||
|
||||
Backtick: \`
|
||||
|
||||
Asterisk: \*
|
||||
|
||||
Underscore: \_
|
||||
|
||||
Left brace: \{
|
||||
|
||||
Right brace: \}
|
||||
|
||||
Left bracket: \[
|
||||
|
||||
Right bracket: \]
|
||||
|
||||
Left paren: \(
|
||||
|
||||
Right paren: \)
|
||||
|
||||
Greater-than: \>
|
||||
|
||||
Hash: \#
|
||||
|
||||
Period: \.
|
||||
|
||||
Bang: \!
|
||||
|
||||
Plus: \+
|
||||
|
||||
Minus: \-
|
||||
</code></pre>
|
||||
|
||||
<p>Nor should these, which occur in code spans:</p>
|
||||
|
||||
<p>Backslash: <code>\\</code></p>
|
||||
|
||||
<p>Backtick: <code>\`</code></p>
|
||||
|
||||
<p>Asterisk: <code>\*</code></p>
|
||||
|
||||
<p>Underscore: <code>\_</code></p>
|
||||
|
||||
<p>Left brace: <code>\{</code></p>
|
||||
|
||||
<p>Right brace: <code>\}</code></p>
|
||||
|
||||
<p>Left bracket: <code>\[</code></p>
|
||||
|
||||
<p>Right bracket: <code>\]</code></p>
|
||||
|
||||
<p>Left paren: <code>\(</code></p>
|
||||
|
||||
<p>Right paren: <code>\)</code></p>
|
||||
|
||||
<p>Greater-than: <code>\></code></p>
|
||||
|
||||
<p>Hash: <code>\#</code></p>
|
||||
|
||||
<p>Period: <code>\.</code></p>
|
||||
|
||||
<p>Bang: <code>\!</code></p>
|
||||
|
||||
<p>Plus: <code>\+</code></p>
|
||||
|
||||
<p>Minus: <code>\-</code></p>
|
||||
|
||||
|
||||
<p>These should get escaped, even though they're matching pairs for
|
||||
other Markdown constructs:</p>
|
||||
|
||||
<p>*asterisks*</p>
|
||||
|
||||
<p>_underscores_</p>
|
||||
|
||||
<p>`backticks`</p>
|
||||
|
||||
<p>This is a code span with a literal backslash-backtick sequence: <code>\`</code></p>
|
||||
|
||||
<p>This is a tag with unescaped backticks <span>bar</span>.</p>
|
||||
|
||||
<p>This is a tag with backslashes <span>bar</span>.</p>
|
|
@ -1,11 +0,0 @@
|
|||
> Example:
|
||||
>
|
||||
> sub status {
|
||||
> print "working";
|
||||
> }
|
||||
>
|
||||
> Or:
|
||||
>
|
||||
> sub status {
|
||||
> return "working";
|
||||
> }
|
|
@ -1,15 +0,0 @@
|
|||
<blockquote>
|
||||
<p>Example:</p>
|
||||
|
||||
<pre><code>sub status {
|
||||
print "working";
|
||||
}
|
||||
</code></pre>
|
||||
|
||||
<p>Or:</p>
|
||||
|
||||
<pre><code>sub status {
|
||||
return "working";
|
||||
}
|
||||
</code></pre>
|
||||
</blockquote>
|
|
@ -1,14 +0,0 @@
|
|||
code block on the first line
|
||||
|
||||
Regular text.
|
||||
|
||||
code block indented by spaces
|
||||
|
||||
Regular text.
|
||||
|
||||
the lines in this block
|
||||
all contain trailing spaces
|
||||
|
||||
Regular Text.
|
||||
|
||||
code block on the last line
|
|
@ -1,17 +0,0 @@
|
|||
<pre><code>code block on the first line
|
||||
</code></pre>
|
||||
|
||||
<p>Regular text.</p>
|
||||
|
||||
<pre><code>code block indented by spaces
|
||||
</code></pre>
|
||||
|
||||
<p>Regular text.</p>
|
||||
|
||||
<pre><code>the lines in this block
|
||||
all contain trailing spaces</code></pre>
|
||||
|
||||
<p>Regular Text.</p>
|
||||
|
||||
<pre><code>code block on the last line
|
||||
</code></pre>
|
|
@ -1,5 +0,0 @@
|
|||
`<test a="` content of attribute `">`
|
||||
|
||||
Fix for backticks within HTML tag: <span attr='`ticks`'>like this</span>
|
||||
|
||||
Here's how you put `` `backticks` `` in a code span.
|
|
@ -1,5 +0,0 @@
|
|||
<p><code><test a="</code> content of attribute <code>"></code></p>
|
||||
|
||||
<p>Fix for backticks within HTML tag: <span>like this</span></p>
|
||||
|
||||
<p>Here's how you put <code>`backticks`</code> in a code span.</p>
|
|
@ -1,8 +0,0 @@
|
|||
In Markdown 1.0.0 and earlier. Version
|
||||
8. This line turns into a list item.
|
||||
Because a hard-wrapped line in the
|
||||
middle of a paragraph looked like a
|
||||
list item.
|
||||
|
||||
Here's one with a bullet.
|
||||
* criminey.
|
|
@ -1,8 +0,0 @@
|
|||
<p>In Markdown 1.0.0 and earlier. Version
|
||||
8. This line turns into a list item.
|
||||
Because a hard-wrapped line in the
|
||||
middle of a paragraph looked like a
|
||||
list item.</p>
|
||||
|
||||
<p>Here's one with a bullet.
|
||||
* criminey.</p>
|
|
@ -1,67 +0,0 @@
|
|||
Dashes:
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
- - -
|
||||
|
||||
- - -
|
||||
|
||||
- - -
|
||||
|
||||
- - -
|
||||
|
||||
- - -
|
||||
|
||||
|
||||
Asterisks:
|
||||
|
||||
***
|
||||
|
||||
***
|
||||
|
||||
***
|
||||
|
||||
***
|
||||
|
||||
***
|
||||
|
||||
* * *
|
||||
|
||||
* * *
|
||||
|
||||
* * *
|
||||
|
||||
* * *
|
||||
|
||||
* * *
|
||||
|
||||
|
||||
Underscores:
|
||||
|
||||
___
|
||||
|
||||
___
|
||||
|
||||
___
|
||||
|
||||
___
|
||||
|
||||
___
|
||||
|
||||
_ _ _
|
||||
|
||||
_ _ _
|
||||
|
||||
_ _ _
|
||||
|
||||
_ _ _
|
||||
|
||||
_ _ _
|
|
@ -1,71 +0,0 @@
|
|||
<p>Dashes:</p>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>---
|
||||
</code></pre>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>- - -
|
||||
</code></pre>
|
||||
|
||||
<p>Asterisks:</p>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>***
|
||||
</code></pre>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>* * *
|
||||
</code></pre>
|
||||
|
||||
<p>Underscores:</p>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>___
|
||||
</code></pre>
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<hr />
|
||||
|
||||
<pre><code>_ _ _
|
||||
</code></pre>
|
|
@ -1,26 +0,0 @@
|
|||
![Alt text](/path/to/img.jpg)
|
||||
|
||||
![Alt text](/path/to/img.jpg "Optional title")
|
||||
|
||||
Inline within a paragraph: [alt text](/url/).
|
||||
|
||||
![alt text](/url/ "title preceded by two spaces")
|
||||
|
||||
![alt text](/url/ "title has spaces afterward" )
|
||||
|
||||
![alt text](</url/>)
|
||||
|
||||
![alt text](</url/> "with a title").
|
||||
|
||||
![Empty]()
|
||||
|
||||
![this is a stupid URL](http://example.com/(parens).jpg)
|
||||
|
||||
|
||||
![alt text][foo]
|
||||
|
||||
[foo]: /url/
|
||||
|
||||
![alt text][bar]
|
||||
|
||||
[bar]: /url/ "Title here"
|
|
@ -1,21 +0,0 @@
|
|||
<p><img src="/path/to/img.jpg" alt="Alt text" /></p>
|
||||
|
||||
<p><img src="/path/to/img.jpg" alt="Alt text" title="Optional title" /></p>
|
||||
|
||||
<p>Inline within a paragraph: <a href="/url/">alt text</a>.</p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" title="title preceded by two spaces" /></p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" title="title has spaces afterward" /></p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" /></p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" title="with a title" />.</p>
|
||||
|
||||
<p><img alt="Empty" /></p>
|
||||
|
||||
<p><img src="http://example.com/(parens).jpg" alt="this is a stupid URL" /></p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" /></p>
|
||||
|
||||
<p><img src="/url/" alt="alt text" title="Title here" /></p>
|
|
@ -1,30 +0,0 @@
|
|||
Simple block on one line:
|
||||
|
||||
<div>foo</div>
|
||||
|
||||
And nested without indentation:
|
||||
|
||||
<div>
|
||||
<div>
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
<div style=">"></div>
|
||||
</div>
|
||||
<div>bar</div>
|
||||
</div>
|
||||
|
||||
And with attributes:
|
||||
|
||||
<div>
|
||||
<div id="foo">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
This was broken in 1.0.2b7:
|
||||
|
||||
<div class="inlinepage">
|
||||
<div class="toggleableend">
|
||||
foo
|
||||
</div>
|
||||
</div>
|
|
@ -1,30 +0,0 @@
|
|||
<p>Simple block on one line:</p>
|
||||
|
||||
<div>foo</div>
|
||||
|
||||
<p>And nested without indentation:</p>
|
||||
|
||||
<div>
|
||||
<div>
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
<div></div>
|
||||
</div>
|
||||
<div>bar</div>
|
||||
</div>
|
||||
|
||||
<p>And with attributes:</p>
|
||||
|
||||
<div>
|
||||
<div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p>This was broken in 1.0.2b7:</p>
|
||||
|
||||
<div>
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
</div>
|
|
@ -1,54 +0,0 @@
|
|||
Here's a simple block:
|
||||
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
|
||||
This should be a code block, though:
|
||||
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
|
||||
As should this:
|
||||
|
||||
<div>foo</div>
|
||||
|
||||
Now, nested:
|
||||
|
||||
<div>
|
||||
<div>
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
This should just be an HTML comment:
|
||||
|
||||
<!-- Comment -->
|
||||
|
||||
Multiline:
|
||||
|
||||
<!--
|
||||
Blah
|
||||
Blah
|
||||
-->
|
||||
|
||||
Code block:
|
||||
|
||||
<!-- Comment -->
|
||||
|
||||
Just plain comment, with trailing spaces on the line:
|
||||
|
||||
<!-- foo -->
|
||||
|
||||
Code:
|
||||
|
||||
<hr />
|
||||
|
||||
Hr's:
|
||||
|
||||
<hr>
|
||||
|
||||
<hr class="foo" id="bar">
|
|
@ -1,58 +0,0 @@
|
|||
<p>Here's a simple block:</p>
|
||||
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
|
||||
<p>This should be a code block, though:</p>
|
||||
|
||||
<pre><code><div>
|
||||
foo
|
||||
</div>
|
||||
</code></pre>
|
||||
|
||||
<p>As should this:</p>
|
||||
|
||||
<pre><code><div>foo</div>
|
||||
</code></pre>
|
||||
|
||||
<p>Now, nested:</p>
|
||||
|
||||
<div>
|
||||
<div>
|
||||
<div>
|
||||
foo
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p>This should just be an HTML comment:</p>
|
||||
|
||||
<!-- Comment -->
|
||||
|
||||
<p>Multiline:</p>
|
||||
|
||||
<!--
|
||||
Blah
|
||||
Blah
|
||||
-->
|
||||
|
||||
<p>Code block:</p>
|
||||
|
||||
<pre><code><!-- Comment -->
|
||||
</code></pre>
|
||||
|
||||
<p>Just plain comment, with trailing spaces on the line:</p>
|
||||
|
||||
<!-- foo -->
|
||||
|
||||
<p>Code:</p>
|
||||
|
||||
<pre><code><hr />
|
||||
</code></pre>
|
||||
|
||||
<p>Hr's:</p>
|
||||
|
||||
<hr>
|
||||
|
||||
<hr>
|
|
@ -1,13 +0,0 @@
|
|||
Paragraph one.
|
||||
|
||||
<!-- This is a simple comment -->
|
||||
|
||||
<!--
|
||||
This is another comment.
|
||||
-->
|
||||
|
||||
Paragraph two.
|
||||
|
||||
<!-- one comment block -- -- with two comments -->
|
||||
|
||||
The end.
|
|
@ -1,13 +0,0 @@
|
|||
<p>Paragraph one.</p>
|
||||
|
||||
<!-- This is a simple comment -->
|
||||
|
||||
<!--
|
||||
This is another comment.
|
||||
-->
|
||||
|
||||
<p>Paragraph two.</p>
|
||||
|
||||
<!-- one comment block -- -- with two comments -->
|
||||
|
||||
<p>The end.</p>
|
|
@ -1,24 +0,0 @@
|
|||
Just a [LINK](/url/).
|
||||
|
||||
[URL and title](/url/ "title").
|
||||
|
||||
[URL and title](/url/ "title preceded by two spaces").
|
||||
|
||||
[URL and title](/url/ "title preceded by a tab").
|
||||
|
||||
[URL and title](/url/ "title has spaces afterward" ).
|
||||
|
||||
[URL wrapped in angle brackets](</url/>).
|
||||
|
||||
[URL w/ angle brackets + title](</url/> "Here's the title").
|
||||
|
||||
[Empty]().
|
||||
|
||||
[With parens in the URL](http://en.wikipedia.org/wiki/WIMP_(computing))
|
||||
|
||||
(With outer parens and [parens in url](/foo(bar)))
|
||||
|
||||
|
||||
[With parens in the URL](/foo(bar) "and a title")
|
||||
|
||||
(With outer parens and [parens in url](/foo(bar) "and a title"))
|
|
@ -1,23 +0,0 @@
|
|||
<p>Just a <a href="/url/">LINK</a>.</p>
|
||||
|
||||
<p><a href="/url/" title="title">URL and title</a>.</p>
|
||||
|
||||
<p><a href="/url/" title="title preceded by two spaces">URL and title</a>.</p>
|
||||
|
||||
<p><a href="/url/" title="title preceded by a tab">URL and title</a>.</p>
|
||||
|
||||
<p><a href="/url/" title="title has spaces afterward">URL and title</a>.</p>
|
||||
|
||||
<p><a href="/url/">URL wrapped in angle brackets</a>.</p>
|
||||
|
||||
<p><a href="/url/" title="Here's the title">URL w/ angle brackets + title</a>.</p>
|
||||
|
||||
<p><a>Empty</a>.</p>
|
||||
|
||||
<p><a href="http://en.wikipedia.org/wiki/WIMP_(computing)">With parens in the URL</a></p>
|
||||
|
||||
<p>(With outer parens and <a href="/foo(bar)">parens in url</a>)</p>
|
||||
|
||||
<p><a href="/foo(bar)" title="and a title">With parens in the URL</a></p>
|
||||
|
||||
<p>(With outer parens and <a href="/foo(bar)" title="and a title">parens in url</a>)</p>
|
|
@ -1,71 +0,0 @@
|
|||
Foo [bar] [1].
|
||||
|
||||
Foo [bar][1].
|
||||
|
||||
Foo [bar]
|
||||
[1].
|
||||
|
||||
[1]: /url/ "Title"
|
||||
|
||||
|
||||
With [embedded [brackets]] [b].
|
||||
|
||||
|
||||
Indented [once][].
|
||||
|
||||
Indented [twice][].
|
||||
|
||||
Indented [thrice][].
|
||||
|
||||
Indented [four][] times.
|
||||
|
||||
[once]: /url
|
||||
|
||||
[twice]: /url
|
||||
|
||||
[thrice]: /url
|
||||
|
||||
[four]: /url
|
||||
|
||||
|
||||
[b]: /url/
|
||||
|
||||
* * *
|
||||
|
||||
[this] [this] should work
|
||||
|
||||
So should [this][this].
|
||||
|
||||
And [this] [].
|
||||
|
||||
And [this][].
|
||||
|
||||
And [this].
|
||||
|
||||
But not [that] [].
|
||||
|
||||
Nor [that][].
|
||||
|
||||
Nor [that].
|
||||
|
||||
[Something in brackets like [this][] should work]
|
||||
|
||||
[Same with [this].]
|
||||
|
||||
In this case, [this](/somethingelse/) points to something else.
|
||||
|
||||
Backslashing should suppress \[this] and [this\].
|
||||
|
||||
[this]: #foo
|
||||
|
||||
|
||||
* * *
|
||||
|
||||
Here's one where the [link
|
||||
breaks] across lines.
|
||||
|
||||
Here's another where the [link
|
||||
breaks] across lines, but with a line-ending space.
|
||||
|
||||
|
||||
[link breaks]: /url/
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user