Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions .changeset/slimy-colts-drive.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
---
"@bbob/plugin-helper": patch
"@bbob/preset-html5": patch
"@bbob/preset-react": patch
"@bbob/preset-vue": patch
"@bbob/parser": patch
"@bbob/preset": patch
"@bbob/react": patch
"@bbob/types": patch
"@bbob/core": patch
"@bbob/html": patch
"@bbob/vue2": patch
"@bbob/vue3": patch
"@bbob/cli": patch
---

fixes problem with context free tags

now code like `[code][codeButton]text[/codeButton][/code]`
will be parsed correctly to `<code>[codeButton]text[/codeButton]</code>`
4 changes: 2 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,5 +40,5 @@ jobs:
- name: Run the coverage
run: pnpm run cover

- name: Coveralls
uses: coverallsapp/github-action@v2
# - name: Coveralls
# uses: coverallsapp/github-action@v2
86 changes: 36 additions & 50 deletions packages/bbob-parser/src/Token.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,49 +7,48 @@ import type { Token as TokenInterface } from "@bbob/types";

// type, value, line, row, start pos, end pos

const TOKEN_TYPE_ID = 't'; // 0;
const TOKEN_VALUE_ID = 'v'; // 1;
const TOKEN_COLUMN_ID = 'r'; // 2;
const TOKEN_LINE_ID = 'l'; // 3;
const TOKEN_START_POS_ID = 's'; // 4;
const TOKEN_END_POS_ID = 'e'; // 5;

const TOKEN_TYPE_WORD = 1; // 'word';
const TOKEN_TYPE_TAG = 2; // 'tag';
const TOKEN_TYPE_ATTR_NAME = 3; // 'attr-name';
const TOKEN_TYPE_ATTR_VALUE = 4; // 'attr-value';
const TOKEN_TYPE_SPACE = 5; // 'space';
const TOKEN_TYPE_NEW_LINE = 6; // 'new-line';
export const TYPE_ID = 't'; // 0;
export const VALUE_ID = 'v'; // 1;
export const LINE_ID = 'l'; // 3;
export const COLUMN_ID = 'r'; // 2;
export const START_POS_ID = 's'; // 4;
export const END_POS_ID = 'e'; // 5;
export const TYPE_WORD = 1; // 'word';
export const TYPE_TAG = 2; // 'tag';
export const TYPE_ATTR_NAME = 3; // 'attr-name';
export const TYPE_ATTR_VALUE = 4; // 'attr-value';
export const TYPE_SPACE = 5; // 'space';
export const TYPE_NEW_LINE = 6; // 'new-line';

const getTokenValue = (token: Token) => {
if (token && typeof token[TOKEN_VALUE_ID] !== 'undefined') {
return token[TOKEN_VALUE_ID];
if (token && typeof token[VALUE_ID] !== 'undefined') {
return token[VALUE_ID];
}

return '';
};

const getTokenLine = (token: Token) => (token && token[TOKEN_LINE_ID]) || 0;
const getTokenLine = (token: Token) => (token && token[LINE_ID]) || 0;

const getTokenColumn = (token: Token) => (token && token[TOKEN_COLUMN_ID]) || 0;
const getTokenColumn = (token: Token) => (token && token[COLUMN_ID]) || 0;

const getStartPosition = (token: Token) => (token && token[TOKEN_START_POS_ID]) || 0;
const getStartPosition = (token: Token) => (token && token[START_POS_ID]) || 0;

const getEndPosition = (token: Token) => (token && token[TOKEN_END_POS_ID]) || 0;
const getEndPosition = (token: Token) => (token && token[END_POS_ID]) || 0;

const isTextToken = (token: Token) => {
if (token && typeof token[TOKEN_TYPE_ID] !== 'undefined') {
return token[TOKEN_TYPE_ID] === TOKEN_TYPE_SPACE
|| token[TOKEN_TYPE_ID] === TOKEN_TYPE_NEW_LINE
|| token[TOKEN_TYPE_ID] === TOKEN_TYPE_WORD;
if (token && typeof token[TYPE_ID] !== 'undefined') {
return token[TYPE_ID] === TYPE_SPACE
|| token[TYPE_ID] === TYPE_NEW_LINE
|| token[TYPE_ID] === TYPE_WORD;
}

return false;
};

const isTagToken = (token: Token) => {
if (token && typeof token[TOKEN_TYPE_ID] !== 'undefined') {
return token[TOKEN_TYPE_ID] === TOKEN_TYPE_TAG;
if (token && typeof token[TYPE_ID] !== 'undefined') {
return token[TYPE_ID] === TYPE_TAG;
}

return false;
Expand All @@ -60,16 +59,16 @@ const isTagEnd = (token: Token) => getTokenValue(token).charCodeAt(0) === SLASH.
const isTagStart = (token: Token) => !isTagEnd(token);

const isAttrNameToken = (token: Token) => {
if (token && typeof token[TOKEN_TYPE_ID] !== 'undefined') {
return token[TOKEN_TYPE_ID] === TOKEN_TYPE_ATTR_NAME;
if (token && typeof token[TYPE_ID] !== 'undefined') {
return token[TYPE_ID] === TYPE_ATTR_NAME;
}

return false;
};

const isAttrValueToken = (token: Token) => {
if (token && typeof token[TOKEN_TYPE_ID] !== 'undefined') {
return token[TOKEN_TYPE_ID] === TOKEN_TYPE_ATTR_VALUE;
if (token && typeof token[TYPE_ID] !== 'undefined') {
return token[TYPE_ID] === TYPE_ATTR_VALUE;
}

return false;
Expand Down Expand Up @@ -103,20 +102,20 @@ class Token<TokenValue = string> implements TokenInterface {
readonly e: number; // end pos

constructor(type?: number, value?: TokenValue, row: number = 0, col: number = 0, start: number = 0, end: number = 0) {
this[TOKEN_LINE_ID] = row;
this[TOKEN_COLUMN_ID] = col;
this[TOKEN_TYPE_ID] = type || 0;
this[TOKEN_VALUE_ID] = String(value);
this[TOKEN_START_POS_ID] = start;
this[TOKEN_END_POS_ID] = end;
this[LINE_ID] = row;
this[COLUMN_ID] = col;
this[TYPE_ID] = type || 0;
this[VALUE_ID] = String(value);
this[START_POS_ID] = start;
this[END_POS_ID] = end;
}

get type() {
return this[TOKEN_TYPE_ID];
return this[TYPE_ID];
}

isEmpty() {
return this[TOKEN_TYPE_ID] === 0 || isNaN(this[TOKEN_TYPE_ID]);
return this[TYPE_ID] === 0 || isNaN(this[TYPE_ID]);
}

isText() {
Expand Down Expand Up @@ -172,18 +171,5 @@ class Token<TokenValue = string> implements TokenInterface {
}
}

export const TYPE_ID = TOKEN_TYPE_ID;
export const VALUE_ID = TOKEN_VALUE_ID;
export const LINE_ID = TOKEN_LINE_ID;
export const COLUMN_ID = TOKEN_COLUMN_ID;
export const START_POS_ID = TOKEN_START_POS_ID;
export const END_POS_ID = TOKEN_END_POS_ID;
export const TYPE_WORD = TOKEN_TYPE_WORD;
export const TYPE_TAG = TOKEN_TYPE_TAG;
export const TYPE_ATTR_NAME = TOKEN_TYPE_ATTR_NAME;
export const TYPE_ATTR_VALUE = TOKEN_TYPE_ATTR_VALUE;
export const TYPE_SPACE = TOKEN_TYPE_SPACE;
export const TYPE_NEW_LINE = TOKEN_TYPE_NEW_LINE;

export { Token };
export default Token;
34 changes: 20 additions & 14 deletions packages/bbob-parser/src/lexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,18 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
col++;
};

const checkContextFreeMode = (name: string, isClosingTag?: boolean) => {
const setupContextFreeTag = (name: string, isClosingTag?: boolean) => {
if (contextFreeTag !== '' && isClosingTag) {
contextFreeTag = '';
}

if (contextFreeTag === '' && contextFreeTags.includes(name.toLowerCase())) {
contextFreeTag = name;
const tagName = name.toLowerCase()

if (contextFreeTag === '' && isTokenNested(name) && contextFreeTags.includes(tagName)) {
contextFreeTag = tagName;
}
};
const toEndTag = (tagName: string) => `${openTag}${SLASH}${tagName}${closeTag}`

const chars = createCharGrabber(buffer, { onSkip });

Expand Down Expand Up @@ -178,12 +181,13 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
const name = tagChars.grabWhile(validName);

emitToken(TYPE_TAG, name, start, masterStartPos + tagChars.getLength() + 1);
checkContextFreeMode(name);

setupContextFreeTag(name);

tagChars.skip();
prevCol++;

// in cases when we has [url=someval]GET[/url] and we dont need to parse all
// in cases when we have [url=someval]GET[/url] and we don't need to parse all
if (isSingleValueTag) {
return TAG_STATE_VALUE;
}
Expand All @@ -202,7 +206,6 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
// detect case where we have '[My word [tag][/tag]' or we have '[My last line word'
const substr = chars.substrUntilChar(closeTag);


const hasInvalidChars = substr.length === 0 || substr.indexOf(openTag) >= 0;
const isNextCharReserved = nextChar && isCharReserved(nextChar)
const isLastChar = chars.isLast()
Expand All @@ -228,7 +231,8 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
chars.skip(); // skip closeTag

emitToken(TYPE_TAG, name, startPos, endPos);
checkContextFreeMode(name, isClosingTag);

setupContextFreeTag(name, isClosingTag);

return STATE_WORD;
}
Expand Down Expand Up @@ -277,12 +281,11 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo

if (chars.getCurr() === openTag) {
if (contextFreeTag) {
const fullTagLen = openTag.length + SLASH.length + contextFreeTag.length;
const fullTagName = `${openTag}${SLASH}${contextFreeTag}`;
const foundTag = chars.grabN(fullTagLen);
const isEndContextFreeMode = foundTag === fullTagName;
const fullTagName = toEndTag(contextFreeTag);
const foundTag = chars.grabN(fullTagName.length);
const isContextFreeEnded = foundTag.toLowerCase() === fullTagName.toLowerCase();

if (isEndContextFreeMode) {
if (isContextFreeEnded) {
return STATE_TAG;
}
} else if (chars.includes(closeTag)) {
Expand Down Expand Up @@ -357,12 +360,15 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
}

function isTokenNested(tokenValue: string) {
const value = openTag + SLASH + tokenValue;
const value = toEndTag(tokenValue);

if (nestedMap.has(value)) {
return !!nestedMap.get(value);
} else {
const status = caseFreeTags ? (buffer.toLowerCase().indexOf(value.toLowerCase()) > -1) : (buffer.indexOf(value) > -1);
const buf = caseFreeTags ? buffer.toLowerCase() : buffer;
const val = caseFreeTags ? value.toLowerCase() : value;

const status = buf.indexOf(val) > -1;

nestedMap.set(value, status);

Expand Down
Loading
Loading