feat: migrate to antlr4ng (#267)

* feat: replace antlr4ts with antlr4ng

* feat: switch caseInsensitive option on

* feat: recompile all g4 file

* feat:  update parser to fit antlr4ng

* test: update test to fit antlr4ng
This commit is contained in:
Hayden 2024-02-26 20:25:09 +08:00 committed by GitHub
parent 5ce89cb421
commit 195878da9b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
112 changed files with 648433 additions and 659067 deletions

View File

@ -17,13 +17,13 @@ module.exports = {
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: true,
collectCoverage: false,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: undefined,
// The directory where Jest should output its coverage files
coverageDirectory: 'coverage',
// coverageDirectory: 'coverage',
// An array of regexp pattern strings used to skip coverage collection
// coveragePathIgnorePatterns: [
@ -76,7 +76,7 @@ module.exports = {
// ],
// An array of file extensions your modules use
moduleFileExtensions: ['js', 'mjs', 'cjs', 'jsx', 'ts', 'tsx', 'json', 'node'],
moduleFileExtensions: ['js', 'mjs', 'ts'],
// A map from regular expressions to module names or to arrays of module names that allow to stub out resources with a single module
moduleNameMapper: {
@ -166,10 +166,11 @@ module.exports = {
// A map from regular expressions to paths to transformers
transform: {
'\\.[jt]sx?$': ['@swc/jest'],
'\\.mjs$': ['@swc/jest'],
},
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
extensionsToTreatAsEsm: ['.ts', '.tsx'],
extensionsToTreatAsEsm: ['.ts'],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
@ -181,4 +182,6 @@ module.exports = {
// Whether to use watchman for file crawling
// watchman: true
transformIgnorePatterns: ['dist/', '<rootDir>/node_modules/.pnpm/(?!(antlr4ng|antlr4-c3)@)'],
};

View File

@ -41,7 +41,7 @@
"@types/jest": "^29.5.1",
"@types/node": "^18.15.11",
"antlr-format-cli": "^1.2.1",
"antlr4ts-cli": "^0.5.0-alpha.4",
"antlr4ng-cli": "^1.0.7",
"chalk": "4.1.2",
"commitizen": "^4.3.0",
"glob": "^10.3.10",
@ -60,8 +60,8 @@
"registry": "https://registry.npmjs.org/"
},
"dependencies": {
"antlr4-c3": "3.1.1",
"antlr4ts": "0.5.0-alpha.4"
"antlr4-c3": "3.3.7",
"antlr4ng": "2.0.11"
},
"sideEffects": false
}

View File

@ -9,9 +9,9 @@ specifiers:
'@types/jest': ^29.5.1
'@types/node': ^18.15.11
antlr-format-cli: ^1.2.1
antlr4-c3: 3.1.1
antlr4ts: 0.5.0-alpha.4
antlr4ts-cli: ^0.5.0-alpha.4
antlr4-c3: 3.3.7
antlr4ng: 2.0.11
antlr4ng-cli: ^1.0.7
chalk: 4.1.2
commitizen: ^4.3.0
glob: ^10.3.10
@ -25,8 +25,8 @@ specifiers:
yargs-parser: ^21.1.1
dependencies:
antlr4-c3: 3.1.1
antlr4ts: 0.5.0-alpha.4
antlr4-c3: 3.3.7_antlr4ng-cli@1.0.7
antlr4ng: 2.0.11_antlr4ng-cli@1.0.7
devDependencies:
'@commitlint/cli': 17.7.2_@swc+core@1.3.60
@ -36,8 +36,8 @@ devDependencies:
'@swc/jest': 0.2.26_@swc+core@1.3.60
'@types/jest': 29.5.1
'@types/node': 18.16.16
antlr-format-cli: 1.2.1
antlr4ts-cli: 0.5.0-alpha.4
antlr-format-cli: 1.2.1_antlr4ng-cli@1.0.7
antlr4ng-cli: 1.0.7
chalk: 4.1.2
commitizen: 4.3.0_@swc+core@1.3.60
glob: 10.3.10
@ -1267,13 +1267,13 @@ packages:
engines: {node: '>=12'}
dev: true
/antlr-format-cli/1.2.1:
/antlr-format-cli/1.2.1_antlr4ng-cli@1.0.7:
resolution: {integrity: sha512-vqpoL9x3bXiNnC/vzZG3XOyk2vUAHPmBbI/ufyAqbxQHD27OPuUM4n/6m6NBEZZ7V4U2aEiefnZg2SCaSU89oA==}
hasBin: true
dependencies:
'@readme/better-ajv-errors': 1.6.0_ajv@8.12.0
ajv: 8.12.0
antlr4ng: 2.0.2
antlr4ng: 2.0.2_antlr4ng-cli@1.0.7
commander: 11.1.0
glob: 10.3.10
ts-json-schema-generator: 1.4.0
@ -1281,27 +1281,34 @@ packages:
- antlr4ng-cli
dev: true
/antlr4-c3/3.1.1:
resolution: {integrity: sha512-S7DixV12kxWexTkQYGvooCgHYU5AjF74oYio+ZNgm0XN3EzxDY3J6Si9GprQ4KksvgWwK//EgZnL/26WB+bOpw==}
/antlr4-c3/3.3.7_antlr4ng-cli@1.0.7:
resolution: {integrity: sha512-F3ndE38wwA6z6AjUbL3heSdEGl4TxulGDPf9xB0/IY4dbRHWBh6XNaqFwur8vHKQk9FS5yNABHeg2wqlqIYO0w==}
dependencies:
antlr4ts: 0.5.0-alpha.4
antlr4ng: 2.0.11_antlr4ng-cli@1.0.7
transitivePeerDependencies:
- antlr4ng-cli
dev: false
/antlr4ng/2.0.2:
/antlr4ng-cli/1.0.7:
resolution: {integrity: sha512-qN2FsDBmLvsQcA5CWTrPz8I8gNXeS1fgXBBhI78VyxBSBV/EJgqy8ks6IDTC9jyugpl40csCQ4sL5K4i2YZ/2w==}
hasBin: true
/antlr4ng/2.0.11_antlr4ng-cli@1.0.7:
resolution: {integrity: sha512-9jM91VVtHSqHkAHQsXHaoaiewFETMvUTI1/tXvwTiFw4f7zke3IGlwEyoKN9NS0FqIwDKFvUNW2e1cKPniTkVQ==}
peerDependencies:
antlr4ng-cli: 1.0.7
dependencies:
antlr4ng-cli: 1.0.7
dev: false
/antlr4ng/2.0.2_antlr4ng-cli@1.0.7:
resolution: {integrity: sha512-Fhs3AvhoGigRt3RpHw0wGA7n03j9BpskH9yCUViNB7NtKuCA+imy2orEZ8qcgPG98f7IryEPYlG9sx99f3ZOyw==}
peerDependencies:
antlr4ng-cli: 1.0.4
dependencies:
antlr4ng-cli: 1.0.7
dev: true
/antlr4ts-cli/0.5.0-alpha.4:
resolution: {integrity: sha512-lVPVBTA2CVHRYILSKilL6Jd4hAumhSZZWA7UbQNQrmaSSj7dPmmYaN4bOmZG79cOy0lS00i4LY68JZZjZMWVrw==}
hasBin: true
dev: true
/antlr4ts/0.5.0-alpha.4:
resolution: {integrity: sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ==}
dev: false
/anymatch/3.1.3:
resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
engines: {node: '>= 8'}

View File

@ -11,7 +11,7 @@ const outputPath = path.resolve(__dirname, '../src/lib');
const languageEntries = fs.readdirSync(grammarsPath);
const baseCmd = 'antlr4ts -visitor -listener -Xexact-output-dir -o';
const baseCmd = 'antlr4ng -Dlanguage=TypeScript -visitor -listener -Xexact-output-dir -o';
function compile(language) {
const cmd = `${baseCmd} ${outputPath}/${language} ${grammarsPath}/${language}/*.g4`;
@ -31,7 +31,7 @@ function compile(language) {
);
} else {
cleanComment(language);
console.log(chalk.greenBright(`Compile ${language} succeeded!`));
console.info(chalk.greenBright(`Compile ${language} succeeded!`));
}
});
}

View File

@ -53,15 +53,13 @@ function execStandardVersion(res) {
cmd += ` --tag-prefix ${tagPrefix} `;
cmd += ' --infile CHANGELOG.md ';
console.log(`Executing: ${cmd} \n`);
console.info(`Executing: ${cmd} \n`);
runCommand(cmd)
.then(({ message }) => {
console.log('Please checkout recent commit, and then');
console.log(
'Push branch and new tag to github, publish package to npm'
);
// message && console.log(message)
console.info('Please checkout recent commit, and then');
console.info('Push branch and new tag to github, publish package to npm');
// message && console.info(message)
})
.catch(({ error, code }) => {
code && console.error('Error: process exit code' + code);

View File

@ -5,6 +5,10 @@
lexer grammar FlinkSqlLexer;
options {
caseInsensitive= true;
}
// SKIP
SPACE : [ \t\r\n]+ -> channel(HIDDEN);
@ -585,9 +589,9 @@ ID_LITERAL : ID_LITERAL_FRAG;
fragment JAR_FILE_PARTTARN : '`' ( '\\' . | '``' | ~('`' | '\\'))* '`';
fragment EXPONENT_NUM_PART : 'E' [-+]? DEC_DIGIT+;
fragment ID_LITERAL_FRAG : [A-Z_0-9a-z]*? [A-Z_a-z]+? [A-Z_0-9a-z]*;
fragment ID_LITERAL_FRAG : [A-Z_0-9]*? [A-Z_]+? [A-Z_0-9]*;
fragment DEC_DIGIT : [0-9];
fragment DEC_LETTER : [A-Za-z];
fragment DEC_LETTER : [A-Z];
fragment DQUOTA_STRING : '"' ( '\\' . | '""' | ~('"' | '\\'))* '"';
fragment SQUOTA_STRING : '\'' ('\\' . | '\'\'' | ~('\'' | '\\'))* '\'';
fragment BIT_STRING_L : 'B' '\'' [01]+ '\'';

View File

@ -6,6 +6,7 @@ parser grammar FlinkSqlParser;
options {
tokenVocab=FlinkSqlLexer;
caseInsensitive= true;
}
program

View File

@ -27,8 +27,9 @@
lexer grammar HiveSqlLexer;
// unsupported option caseInsensitive in antlr4@4.9
// options { caseInsensitive = true; }
options {
caseInsensitive= true;
}
// Keywords
KW_ABORT : 'ABORT';
@ -502,7 +503,7 @@ Identifier: (Letter | Digit) (Letter | Digit | '_')* | QuotedIdentifier | '`' Re
fragment QuotedIdentifier: '`' ('``' | ~'`')* '`';
fragment Letter: 'A' ..'Z' | 'a' ..'z';
fragment Letter: 'A' ..'Z';
fragment HexDigit: 'A' ..'F';

View File

@ -29,6 +29,7 @@ parser grammar HiveSqlParser;
options
{
tokenVocab=HiveSqlLexer;
caseInsensitive= true;
}
program

View File

@ -27,7 +27,7 @@ THE SOFTWARE.
lexer grammar ImpalaSqlLexer;
options {
caseInsensitive=true;
caseInsensitive= true;
}
KW_ADD : 'ADD';

View File

@ -21,6 +21,7 @@ parser grammar ImpalaSqlParser;
options
{
tokenVocab=ImpalaSqlLexer;
caseInsensitive= true;
}
program
@ -873,7 +874,7 @@ booleanExpression
| left=booleanExpression operator=KW_OR right=booleanExpression # logicalBinary
;
predicate[ParserRuleContext value]
predicate[antlr.ParserRuleContext value]
: comparisonOperator right=valueExpression # comparison
| comparisonOperator comparisonQuantifier subQueryRelation # quantifiedComparison
| KW_NOT? KW_BETWEEN lower=valueExpression KW_AND upper=valueExpression # between

View File

@ -35,6 +35,7 @@ parser grammar MySqlParser;
options {
tokenVocab= MySqlLexer;
caseInsensitive= true;
}
// Top Level Description

View File

@ -36,6 +36,10 @@
lexer grammar PostgreSQLLexer;
options {
caseInsensitive= true;
}
/**
* Reference Doc: https://www.postgresql.org/docs/16.1/sql-commands.html
*/
@ -673,9 +677,9 @@ KW_BUFFER_USAGE_LIMIT : 'BUFFER_USAGE_LIMIT';
Identifier: IdentifierStartChar IdentifierChar*;
fragment IdentifierStartChar: // these are the valid identifier start characters below 0x7F
[a-zA-Z_]
[A-Z_]
| // these are the valid characters from 0x80 to 0xFF
[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF]
[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00F8-\u00FF]
| // these are the letters above 0xFF which only need a single UTF-16 code unit
[\u0100-\uD7FF\uE000-\uFFFF]
| // letters which require multiple UTF-16 code units
@ -771,7 +775,7 @@ InvalidUnterminatedBinaryStringConstant: 'B' UnterminatedStringConstant;
HexadecimalStringConstant: UnterminatedHexadecimalStringConstant '\'';
UnterminatedHexadecimalStringConstant: 'X' '\'' [0-9a-fA-F]*;
UnterminatedHexadecimalStringConstant: 'X' '\'' [0-9A-F]*;
InvalidHexadecimalStringConstant: InvalidUnterminatedHexadecimalStringConstant '\'';
@ -791,7 +795,7 @@ Numeric:
fragment Digits: [0-9]+;
PLSQLVARIABLENAME: ':' [a-zA-Z_] [a-zA-Z_0-9$]*;
PLSQLVARIABLENAME: ':' [A-Z_] [A-Z_0-9$]*;
PLSQLIDENTIFIER: ':"' ('\\' . | '""' | ~ ('"' | '\\'))* '"';
//
@ -861,13 +865,13 @@ fragment EscapeStringText: (
'\'\''
| '\\' (
// two-digit hex escapes are still valid when treated as single-digit escapes
'x' [0-9a-fA-F]
| 'u' [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]
| 'U' [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]
'x' [0-9A-F]
| 'u' [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F]
| 'U' [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F] [0-9A-F]
|
// Any character other than the Unicode escapes can follow a backslash. Some have
// special meaning, but that doesn't affect the syntax.
~ [xuU]
~ [xu]
)
| ~ ['\\]
)*;

View File

@ -41,6 +41,7 @@ parser grammar PostgreSQLParser;
options {
tokenVocab= PostgreSQLLexer;
caseInsensitive= true;
}
program

View File

@ -2366,7 +2366,7 @@ BIT_STRING_LIT: 'B' ('\'' [01]* '\'')+;
// Rule #284 <HEX_STRING_LIT> - subtoken typecast in <REGULAR_ID>
// Lowercase 'x' is a usual addition to the standard
HEX_STRING_LIT : 'X' ('\'' [A-Fa-f0-9]* '\'')+;
HEX_STRING_LIT : 'X' ('\'' [A-F0-9]* '\'')+;
DOUBLE_PERIOD : '..';
PERIOD : '.';
@ -2468,9 +2468,7 @@ REMARK_COMMENT:
PROMPT_MESSAGE: 'PRO' {this.IsNewlineAtPos(-4)}? 'MPT'? (' ' ~('\r' | '\n')*)? NEWLINE_EOF;
// TODO: should starts with newline
START_CMD
//: 'STA' 'RT'? SPACE ~('\r' | '\n')* NEWLINE_EOF
: // https://docs.oracle.com/cd/B19306_01/server.102/b14357/ch12002.htm
START_CMD: // https://docs.oracle.com/cd/B19306_01/server.102/b14357/ch12002.htm
'@' {this.IsNewlineAtPos(-2)}? '@'? ~('\r' | '\n')* NEWLINE_EOF; // https://docs.oracle.com/cd/B19306_01/server.102/b14357/ch12003.htm
REGULAR_ID: SIMPLE_LETTER (SIMPLE_LETTER | '$' | '_' | '#' | [0-9])*;
@ -2481,7 +2479,7 @@ SPACES: [ \t\r\n]+ -> channel(HIDDEN);
fragment NEWLINE_EOF : NEWLINE | EOF;
fragment QUESTION_MARK : '?';
fragment SIMPLE_LETTER : [a-zA-Z];
fragment SIMPLE_LETTER : [A-Z];
fragment FLOAT_FRAGMENT : UNSIGNED_INTEGER* '.'? UNSIGNED_INTEGER+;
fragment NEWLINE : '\r'? '\n';
fragment SPACE : [ \t];

View File

@ -32,6 +32,7 @@ parser grammar PlSqlParser;
options {
tokenVocab=PlSqlLexer;
superClass=PlSqlBaseParser;
caseInsensitive= true;
}
@parser::header {

View File

@ -25,6 +25,10 @@
lexer grammar SparkSqlLexer;
options {
caseInsensitive= true;
}
@members {
/**
* When true, parser should throw ParseException for unclosed bracketed comment.
@ -469,7 +473,7 @@ fragment EXPONENT: 'E' [+-]? DIGIT+;
fragment DIGIT: [0-9];
fragment LETTER: [A-Za-z];
fragment LETTER: [A-Z];
SIMPLE_COMMENT: '--' ('\\\n' | ~[\r\n])* '\r'? '\n'? -> channel(HIDDEN);

View File

@ -26,6 +26,7 @@ parser grammar SparkSqlParser;
options {
tokenVocab=SparkSqlLexer;
caseInsensitive= true;
}
program

View File

@ -23,6 +23,10 @@
grammar TrinoSql;
options {
caseInsensitive= true;
}
tokens {
DELIMITER
}
@ -419,7 +423,7 @@ booleanExpression
;
// workaround for https://github.com/antlr/antlr4/issues/780
predicate[ParserRuleContext value]
predicate[antlr.ParserRuleContext value]
: comparisonOperator right= valueExpression # comparison
| comparisonOperator comparisonQuantifier '(' query ')' # quantifiedComparison
| KW_NOT? KW_BETWEEN lower= valueExpression KW_AND upper= valueExpression # between
@ -1231,7 +1235,7 @@ fragment EXPONENT: 'E' [+-]? DIGIT+;
fragment DIGIT: [0-9];
fragment LETTER: [A-Za-z];
fragment LETTER: [A-Z];
SIMPLE_COMMENT: '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN);

View File

@ -1,4 +1,4 @@
export { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
export { AbstractParseTreeVisitor } from 'antlr4ng';
export {
MySQL,

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
import { Lexer } from "antlr4ts/Lexer";
import { Lexer } from "antlr4ng";
export default abstract class PlSqlBaseLexer extends Lexer {

View File

@ -1,6 +1,4 @@
import { Parser } from "antlr4ts/Parser";
import { TokenStream } from "antlr4ts/TokenStream";
import { Parser, TokenStream } from "antlr4ng";
export default abstract class PlSqlBaseParser extends Parser {
private _isVersion10: boolean = false;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,11 @@ import {
Token,
CharStreams,
CommonTokenStream,
CodePointCharStream,
CharStream,
ParserRuleContext,
} from 'antlr4ts';
import { ParseTreeWalker, ParseTreeListener } from 'antlr4ts/tree';
ParseTreeWalker,
ParseTreeListener,
} from 'antlr4ng';
import { CandidatesCollection, CodeCompletionCore } from 'antlr4-c3';
import { findCaretTokenIndex } from './utils/findCaretTokenIndex';
import {
@ -38,7 +39,7 @@ export default abstract class BasicParser<
P extends IParser<PRC> = IParser<PRC>,
> {
/** members for cache start */
protected _charStreams: CodePointCharStream;
protected _charStreams: CharStream;
protected _lexer: L;
protected _tokenStream: CommonTokenStream;
protected _parser: P;
@ -60,7 +61,7 @@ export default abstract class BasicParser<
* Create a antlr4 Lexer instance.
* @param input source string
*/
protected abstract createLexerFromCharStream(charStreams: CodePointCharStream): L;
protected abstract createLexerFromCharStream(charStreams: CharStream): L;
/**
* Create Parser by CommonTokenStream
@ -92,7 +93,7 @@ export default abstract class BasicParser<
* @param input string
*/
public createLexer(input: string, errorListener?: ErrorListener<any>) {
const charStreams = CharStreams.fromString(input.toUpperCase());
const charStreams = CharStreams.fromString(input);
const lexer = this.createLexerFromCharStream(charStreams);
if (errorListener) {
lexer.removeErrorListeners();
@ -126,7 +127,7 @@ export default abstract class BasicParser<
*/
public parse(input: string, errorListener?: ErrorListener<any>) {
const parser = this.createParser(input, errorListener);
parser.buildParseTree = true;
parser.buildParseTrees = true;
parser.errorHandler = new ErrorStrategy();
return parser.program();
@ -139,7 +140,7 @@ export default abstract class BasicParser<
*/
private createParserWithCache(input: string): P {
this._parseTree = null;
this._charStreams = CharStreams.fromString(input.toUpperCase());
this._charStreams = CharStreams.fromString(input);
this._lexer = this.createLexerFromCharStream(this._charStreams);
this._lexer.removeErrorListeners();
@ -154,7 +155,7 @@ export default abstract class BasicParser<
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
this._parser.buildParseTrees = true;
this._parser.errorHandler = new ErrorStrategy();
return this._parser;
@ -239,13 +240,13 @@ export default abstract class BasicParser<
const res = splitListener.statementsContext.map((context) => {
const { start, stop } = context;
return {
startIndex: start.startIndex,
endIndex: stop.stopIndex,
startIndex: start.start,
endIndex: stop.stop,
startLine: start.line,
endLine: stop.line,
startColumn: start.charPositionInLine + 1,
endColumn: stop.charPositionInLine + 1 + stop.text.length,
text: this._parsedInput.slice(start.startIndex, stop.stopIndex + 1),
startColumn: start.column + 1,
endColumn: stop.column + 1 + stop.text.length,
text: this._parsedInput.slice(start.start, stop.stop + 1),
};
});
@ -317,8 +318,8 @@ export default abstract class BasicParser<
}
// A boundary consisting of the index of the input.
const startIndex = startStatement?.start?.startIndex ?? 0;
const stopIndex = stopStatement?.stop?.stopIndex ?? input.length - 1;
const startIndex = startStatement?.start?.start ?? 0;
const stopIndex = stopStatement?.stop?.stop ?? input.length - 1;
/**
* Save offset of the tokenIndex in the range of input
@ -340,7 +341,7 @@ export default abstract class BasicParser<
const parser = this.createParserFromTokenStream(tokenStream);
parser.removeErrorListeners();
parser.buildParseTree = true;
parser.buildParseTrees = true;
parser.errorHandler = new ErrorStrategy();
sqlParserIns = parser;
@ -362,12 +363,12 @@ export default abstract class BasicParser<
(syntaxCtx) => {
const wordRanges: WordRange[] = syntaxCtx.wordRanges.map((token) => {
return {
text: this._parsedInput.slice(token.startIndex, token.stopIndex + 1),
startIndex: token.startIndex,
endIndex: token.stopIndex,
text: this._parsedInput.slice(token.start, token.stop + 1),
startIndex: token.start,
endIndex: token.stop,
line: token.line,
startColumn: token.charPositionInLine + 1,
stopColumn: token.charPositionInLine + 1 + token.text.length,
startColumn: token.column + 1,
stopColumn: token.column + 1 + token.text.length,
};
});
return {

View File

@ -1,10 +1,12 @@
import { DefaultErrorStrategy } from 'antlr4ts/DefaultErrorStrategy';
import { Parser } from 'antlr4ts/Parser';
import { InputMismatchException } from 'antlr4ts/InputMismatchException';
import { IntervalSet } from 'antlr4ts/misc/IntervalSet';
import { ParserRuleContext } from 'antlr4ts/ParserRuleContext';
import { RecognitionException } from 'antlr4ts/RecognitionException';
import { Token } from 'antlr4ts/Token';
import {
DefaultErrorStrategy,
Parser,
InputMismatchException,
IntervalSet,
ParserRuleContext,
RecognitionException,
Token,
} from 'antlr4ng';
/**
* Base on DefaultErrorStrategy.
@ -33,7 +35,7 @@ export class ErrorStrategy extends DefaultErrorStrategy {
if (!this.lastErrorStates) {
this.lastErrorStates = new IntervalSet();
}
this.lastErrorStates.add(recognizer.state);
this.lastErrorStates.addOne(recognizer.state);
let followSet: IntervalSet = this.getErrorRecoverySet(recognizer);
this.consumeUntil(recognizer, followSet);
}
@ -43,11 +45,7 @@ export class ErrorStrategy extends DefaultErrorStrategy {
if (this.nextTokensContext === undefined) {
e = new InputMismatchException(recognizer);
} else {
e = new InputMismatchException(
recognizer,
this.nextTokensState,
this.nextTokensContext
);
e = new InputMismatchException(recognizer);
}
// Mark the context as an anomaly

View File

@ -1,5 +1,10 @@
import { Token, Recognizer, ANTLRErrorListener, RecognitionException } from 'antlr4ts';
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator';
import {
Token,
Recognizer,
ANTLRErrorListener,
RecognitionException,
ATNSimulator,
} from 'antlr4ng';
/**
* Converted from {@link SyntaxError}.
@ -20,7 +25,7 @@ export interface ParseError {
* The type of error resulting from lexical parsing and parsing.
*/
export interface SyntaxError<T> {
readonly recognizer: Recognizer<T, ATNSimulator>;
readonly recognizer: Recognizer<ATNSimulator>;
readonly offendingSymbol: Token;
readonly line: number;
readonly charPositionInLine: number;
@ -34,15 +39,21 @@ export interface SyntaxError<T> {
*/
export type ErrorListener<T> = (parseError: ParseError, originalError: SyntaxError<T>) => void;
export default class ParseErrorListener implements ANTLRErrorListener<Token> {
export default class ParseErrorListener implements ANTLRErrorListener {
private _errorListener: ErrorListener<Token>;
constructor(errorListener: ErrorListener<Token>) {
this._errorListener = errorListener;
}
reportAmbiguity() {}
reportAttemptingFullContext() {}
reportContextSensitivity() {}
syntaxError(
recognizer: Recognizer<Token, ATNSimulator>,
recognizer: Recognizer<ATNSimulator>,
offendingSymbol,
line: number,
charPositionInLine: number,

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CaretPosition } from '../basic-parser-types';
/**
@ -15,15 +15,11 @@ export function findCaretTokenIndex(caretPosition: CaretPosition, allTokens: Tok
while (left <= right) {
const mid = left + ((right - left) >> 1);
const token = allTokens[mid];
if (
token.line > caretLine ||
(token.line === caretLine && token.charPositionInLine + 1 >= caretCol)
) {
if (token.line > caretLine || (token.line === caretLine && token.column + 1 >= caretCol)) {
right = mid - 1;
} else if (
token.line < caretLine ||
(token.line === caretLine &&
token.charPositionInLine + token.text.length + 1 < caretCol)
(token.line === caretLine && token.column + token.text.length + 1 < caretCol)
) {
left = mid + 1;
} else {

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { FlinkSqlLexer } from '../lib/flinksql/FlinkSqlLexer';
import {
@ -139,7 +139,10 @@ export class FlinkSqlSplitListener implements FlinkSqlParserListener {
this._statementsContext.push(ctx);
};
enterSingleStatement = (ctx: SingleStatementContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementsContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
import { HiveSqlParser, ProgramContext, StatementContext } from '../lib/hive/HiveSqlParser';
@ -130,7 +130,10 @@ export class HiveSqlSplitListener implements HiveSqlParserListener {
this._statementContext.push(ctx);
};
enterStatement = (ctx: StatementContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { ImpalaSqlLexer } from '../lib/impala/ImpalaSqlLexer';
import {
@ -135,7 +135,10 @@ export class ImpalaSqlSplitListener implements ImpalaSqlParserListener {
this._statementContext.push(ctx);
};
enterSingleStatement = (ctx: SingleStatementContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { MySqlLexer } from '../lib/mysql/MySqlLexer';
import { MySqlParser, ProgramContext, SingleStatementContext } from '../lib/mysql/MySqlParser';
@ -130,7 +130,10 @@ export class MysqlSplitListener implements MySqlParserListener {
this._statementsContext.push(ctx);
};
enterSingleStatement = (ctx: SingleStatementContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementsContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { PostgreSQLLexer } from '../lib/pgsql/PostgreSQLLexer';
import { PostgreSQLParser, ProgramContext, SingleStmtContext } from '../lib/pgsql/PostgreSQLParser';
@ -152,7 +152,10 @@ export class PgSqlSplitListener implements PostgreSQLParserListener {
this._statementsContext.push(ctx);
};
enterSingleStmt = (ctx: SingleStmtContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementsContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { PlSqlLexer } from '../lib/plsql/PlSqlLexer';
import { PlSqlParser, ProgramContext } from '../lib/plsql/PlSqlParser';

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
import {
@ -135,7 +135,10 @@ export class SparkSqlSplitListener implements SparkSqlParserListener {
this._statementsContext.push(ctx);
};
enterSingleStatement = (ctx: SingleStatementContext) => {};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementsContext;

View File

@ -1,4 +1,4 @@
import { Token } from 'antlr4ts';
import { Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { TrinoSqlLexer } from '../lib/trinosql/TrinoSqlLexer';
import {
@ -135,6 +135,11 @@ export class TrinoSqlSplitListener implements TrinoSqlListener {
this._statementsContext.push(ctx);
};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
get statementsContext() {
return this._statementsContext;
}

View File

@ -1,7 +1,7 @@
import FlinkSQL from 'src/parser/flinksql';
import { FlinkSqlLexer } from 'src/lib/flinksql/FlinkSqlLexer';
import { ErrorListener } from 'src/parser/common/parseErrorListener';
import { CommonTokenStream } from 'antlr4ts';
import { CommonTokenStream } from 'antlr4ng';
describe('BasicParser unit tests', () => {
const flinkParser = new FlinkSQL();

View File

@ -50,7 +50,7 @@ export function benchmark(name: string, fn: Function, times: number = 1): [numbe
const msg = `Benchmark: ${name} executed ${times} times. Total time: ${totalTime.toFixed(
2
)}ms. Average time: ${averageTime.toFixed(2)}ms`;
console.log(msg);
console.info(msg);
return [totalTime, averageTime, msg];
}

View File

@ -19,9 +19,9 @@ describe('FlinkSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== statementCount - 2) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -36,9 +36,9 @@ describe('FlinkSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== 0) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -52,9 +52,9 @@ describe('FlinkSQL ErrorStrategy test', () => {
splitListener.statementsContext.map((item, index) => {
if (index !== 0 && index !== 1) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});

View File

@ -1,4 +1,4 @@
import { ParseTreeListener } from 'antlr4ts/tree/ParseTreeListener';
import { ErrorNode, ParseTreeListener, ParserRuleContext, TerminalNode } from 'antlr4ng';
import FlinkSQL from 'src/parser/flinksql';
import { FlinkSqlParserListener } from 'src/lib/flinksql/FlinkSqlParserListener';
import { TableExpressionContext } from 'src/lib/flinksql/FlinkSqlParser';
@ -14,12 +14,16 @@ describe('Flink SQL Listener Tests', () => {
let result = '';
class MyListener implements FlinkSqlParserListener {
enterTableExpression = (ctx: TableExpressionContext): void => {
result = ctx.text.toLowerCase();
result = ctx.getText().toLowerCase();
};
visitTerminal(node: TerminalNode): void {}
visitErrorNode(node: ErrorNode): void {}
enterEveryRule(node: ParserRuleContext): void {}
exitEveryRule(node: ParserRuleContext): void {}
}
const listenTableName = new MyListener();
await parser.listen(listenTableName as ParseTreeListener, parseTree);
await parser.listen(listenTableName, parseTree);
expect(result).toBe(expectTableName);
});

View File

@ -1,6 +1,7 @@
import FlinkSQL from 'src/parser/flinksql';
import { FlinkSqlParserVisitor } from 'src/lib/flinksql/FlinkSqlParserVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ng';
import { TableExpressionContext } from 'src/lib/flinksql/FlinkSqlParser';
describe('Flink SQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -8,7 +9,7 @@ describe('Flink SQL Visitor Tests', () => {
const parser = new FlinkSQL();
const parseTree = parser.parse(sql, (error) => {
console.log('Parse error:', error);
console.error('Parse error:', error);
});
test('Visitor visitTableName', () => {
@ -20,9 +21,9 @@ describe('Flink SQL Visitor Tests', () => {
protected defaultResult() {
return result;
}
visitTableExpression = (ctx): void => {
result = ctx.text.toLowerCase();
};
visitTableExpression(ctx: TableExpressionContext) {
result = ctx.getText().toLowerCase();
}
}
const visitor: any = new MyVisitor();
visitor.visit(parseTree);

View File

@ -19,9 +19,9 @@ describe('HiveSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== statementCount - 2) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -36,9 +36,9 @@ describe('HiveSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== 0) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -52,9 +52,9 @@ describe('HiveSQL ErrorStrategy test', () => {
splitListener.statementsContext.map((item, index) => {
if (index !== 0 && index !== 1) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});

View File

@ -1,6 +1,6 @@
import { ParseTreeListener } from 'antlr4ts/tree/ParseTreeListener';
import { ParseTreeListener } from 'antlr4ng';
import HiveSQL from 'src/parser/hive';
import { ProgramContext } from 'src/lib/hive/HiveSqlParser';
import { ProgramContext, SelectItemContext } from 'src/lib/hive/HiveSqlParser';
import { HiveSqlParserListener } from 'src/lib/hive/HiveSqlParserListener';
describe('HiveSQL Listener Tests', () => {
@ -12,14 +12,18 @@ describe('HiveSQL Listener Tests', () => {
let result = '';
class MyListener implements HiveSqlParserListener {
enterSelectItem(ctx) {
result = ctx.text;
enterSelectItem(ctx: SelectItemContext) {
result = ctx.getText();
}
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
}
const listenTableName = new MyListener();
await parser.listen(listenTableName as ParseTreeListener, parseTree as ProgramContext);
expect(result).toBe(expectTableName.toUpperCase());
expect(result).toBe(expectTableName);
});
test('Listener enterCreateTable', async () => {
const sql = `drop table table_name;`;
@ -27,13 +31,18 @@ describe('HiveSQL Listener Tests', () => {
let result = '';
class MyListener implements HiveSqlParserListener {
enterDropTableStatement(ctx) {
result = ctx.text;
result = ctx.getText();
}
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
}
const listenTableName = new MyListener();
await parser.listen(listenTableName as ParseTreeListener, parseTree as ProgramContext);
expect(result).toBe('DROPTABLETABLE_NAME');
expect(result).toBe('droptabletable_name');
});
test('Split sql listener', async () => {

View File

@ -1,8 +1,8 @@
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ng';
import HiveSQL from 'src/parser/hive';
import { HiveSqlParserVisitor } from 'src/lib/hive/HiveSqlParserVisitor';
import { ProgramContext } from 'src/lib/hive/HiveSqlParser';
import { ProgramContext, TableNameContext } from 'src/lib/hive/HiveSqlParser';
describe('HiveSQL Visitor Tests', () => {
const expectTableName = 'dm_gis.dlv_addr_tc_count';
@ -10,7 +10,7 @@ describe('HiveSQL Visitor Tests', () => {
const parser = new HiveSQL();
const parseTree = parser.parse(sql, (error) => {
console.log('Parse error:', error);
console.error('Parse error:', error);
});
test('Visitor visitTableName', () => {
@ -20,8 +20,8 @@ describe('HiveSQL Visitor Tests', () => {
return result;
}
visitTableName(ctx) {
result = ctx.text.toLowerCase();
visitTableName(ctx: TableNameContext) {
result = ctx.getText().toLowerCase();
}
}

View File

@ -19,9 +19,9 @@ describe('ImpalaSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== statementCount - 2) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -36,9 +36,9 @@ describe('ImpalaSQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== 0) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -52,9 +52,9 @@ describe('ImpalaSQL ErrorStrategy test', () => {
splitListener.statementsContext.map((item, index) => {
if (index !== 0 && index !== 1) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});

View File

@ -1,6 +1,6 @@
import ImpalaSQL from 'src/parser/impala';
import { ImpalaSqlParserListener } from 'src/lib/impala/ImpalaSqlParserListener';
import { ParseTreeListener } from 'antlr4ts/tree/ParseTreeListener';
import { ParseTreeListener } from 'antlr4ng';
describe('impala SQL Listener Tests', () => {
const expectTableName = 'user1';
@ -13,8 +13,13 @@ describe('impala SQL Listener Tests', () => {
let result = '';
class MyListener implements ImpalaSqlParserListener {
enterTableNamePath = (ctx): void => {
result = ctx.text.toLowerCase();
result = ctx.getText().toLowerCase();
};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
}
const listenTableName = new MyListener();

View File

@ -1,5 +1,5 @@
import ImpalaSQL from 'src/parser/impala';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ng';
import { ImpalaSqlParserVisitor } from 'src/lib/impala/ImpalaSqlParserVisitor';
describe('impala SQL Visitor Tests', () => {
@ -8,7 +8,7 @@ describe('impala SQL Visitor Tests', () => {
const parser = new ImpalaSQL();
const parseTree = parser.parse(sql, (error) => {
console.log('Parse error:', error);
console.error('Parse error:', error);
});
test('Visitor visitTableNamePath', () => {
@ -21,7 +21,7 @@ describe('impala SQL Visitor Tests', () => {
return result;
}
visitTableNamePath = (ctx): void => {
result = ctx.text.toLowerCase();
result = ctx.getText().toLowerCase();
};
}
const visitor: any = new MyVisitor();

View File

@ -19,9 +19,9 @@ describe('MySQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== statementCount - 2) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -36,9 +36,9 @@ describe('MySQL ErrorStrategy test', () => {
const statementCount = splitListener.statementsContext.length;
splitListener.statementsContext.map((item, index) => {
if (index !== statementCount - 1 && index !== 0) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});
@ -52,9 +52,9 @@ describe('MySQL ErrorStrategy test', () => {
splitListener.statementsContext.map((item, index) => {
if (index !== 0 && index !== 1) {
expect(item.exception).not.toBe(undefined);
expect(item.exception).not.toBe(null);
} else {
expect(item.exception).toBe(undefined);
expect(item.exception).toBe(null);
}
});
});

View File

@ -1,6 +1,6 @@
import MySQL from 'src/parser/mysql';
import { MySqlParserListener } from 'src/lib/mysql/MySqlParserListener';
import { ParseTreeListener } from 'antlr4ts/tree/ParseTreeListener';
import { ParseTreeListener } from 'antlr4ng';
describe('MySQL Listener Tests', () => {
const expectTableName = 'user1';
@ -13,8 +13,12 @@ describe('MySQL Listener Tests', () => {
let result = '';
class MyListener implements MySqlParserListener {
enterTableName = (ctx): void => {
result = ctx.text.toLowerCase();
result = ctx.getText().toLowerCase();
};
visitTerminal() {}
visitErrorNode() {}
enterEveryRule() {}
exitEveryRule() {}
}
const listenTableName: any = new MyListener();

View File

@ -42,7 +42,7 @@ describe('MySQL Database Administration Syntax Tests', () => {
it(sql, () => {
const result = parser.validate(sql);
if (result.length) {
console.log(result, `\nPlease check sql: ${sql}`);
console.error(result, `\nPlease check sql: ${sql}`);
}
expect(result.length).toBe(0);
});

View File

@ -49,7 +49,7 @@ describe('MySQL DDL Syntax Tests', () => {
it(sql, () => {
const result = parser.validate(sql);
if (result.length) {
console.log(result, `\nPlease check sql: ${sql}`);
console.error(result, `\nPlease check sql: ${sql}`);
}
expect(result.length).toBe(0);
});

View File

@ -33,7 +33,7 @@ describe('MySQL DML Syntax Tests', () => {
it(sql, () => {
const result = parser.validate(sql);
if (result.length) {
console.log(result, `\nPlease check sql: ${sql}`);
console.error(result, `\nPlease check sql: ${sql}`);
}
expect(result.length).toBe(0);
});

View File

@ -27,7 +27,7 @@ describe('MySQL Transactional and Locking, Replication, Prepared Compound and Ut
it(sql, () => {
const result = parser.validate(sql);
if (result.length) {
console.log(result, `\nPlease check sql: ${sql}`);
console.error(result, `\nPlease check sql: ${sql}`);
}
expect(result.length).toBe(0);
});

View File

@ -1,6 +1,6 @@
import MySQL from 'src/parser/mysql';
import { MySqlParserVisitor } from 'src/lib/mysql/MySqlParserVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ng';
describe('MySQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -8,7 +8,7 @@ describe('MySQL Visitor Tests', () => {
const parser = new MySQL();
const parseTree = parser.parse(sql, (error) => {
console.log('Parse error:', error);
console.error('Parse error:', error);
});
test('Visitor visitTableName', () => {
@ -19,7 +19,7 @@ describe('MySQL Visitor Tests', () => {
}
visitTableName = (ctx): void => {
result = ctx.text.toLowerCase();
result = ctx.getText().toLowerCase();
};
}
const visitor = new MyVisitor();

Some files were not shown because too many files have changed in this diff Show More