feat: upgrade antlr4 to 4.12.0 (#88)

This commit is contained in:
Ziv 2023-05-04 10:13:05 +08:00 committed by GitHub
parent c0842b3e07
commit c1c72def30
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
116 changed files with 552721 additions and 609942 deletions

View File

@ -9,7 +9,7 @@ jobs:
strategy:
matrix:
node-version: [12.x, 14.x]
node-version: [16.x]
steps:
- uses: actions/checkout@v1
@ -17,10 +17,11 @@ jobs:
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- name: npm install, test, and build
- name: install, test, build
run: |
npm install
npm test
npm run build
export NODE_OPTIONS="--max_old_space_size=4096"
yarn install
yarn test
yarn build
env:
CI: true

View File

@ -1,10 +1,33 @@
# dt-sql-parser
## Get Start
## Prerequisites
installing the dependencies after cloned project:
```bash
yarn install
```
- test
```bash
yarn test
```
## Compile the grammar sources
Compile one language:
```bash
yarn antlr4 --lang=generic
```
Compile all languages:
```bash
yarn antlr4 --all
```
## Branch Organization
## Source Code Organization

Binary file not shown.

Binary file not shown.

View File

@ -1,7 +1,8 @@
const path = require('path');
const exec = require('child_process').exec;
const argv = require('yargs-parser')(process.argv.slice(2))
const antlr4 = path.resolve(__dirname, 'antlr-4.8-complete.jar');
const antlr4 = path.resolve(__dirname, './antlr-4.12.0-complete.jar');
const grammars = path.resolve(__dirname, '../src/grammar');
const output = path.resolve(__dirname, '../src/lib');
@ -14,16 +15,16 @@ const entry = [
'flinksql',
];
entry.forEach((language) => {
function compile(language) {
const cmd = `
java -jar ${antlr4}
-Dlanguage=JavaScript
-Dlanguage=TypeScript
-visitor
-listener
-o ${output}/${language}
${grammars}/${language}/*.g4
`.replace(/\n/g, '');
console.log('cmd:', cmd);
console.info('Executing:', cmd);
exec(cmd, (err) => {
if (err) {
console.error('Antlr4 build error: ' + language, err);
@ -31,5 +32,20 @@ entry.forEach((language) => {
console.log(`Build ${language} success.`);
}
});
});
}
if (argv.all) { // build all: yarn antlr4 --all
entry.forEach((language) => {
compile(language);
});
} else if (argv.lang) {// build single: yarn antlr4 --lang=generic
const supportedLanguage = entry.find((language) => language === argv.lang);
if (supportedLanguage) {
compile(argv.lang);
} else {
console.error('Unsupported language: ' + argv.lang);
}
} else {
console.error('Please to specify the language, just like: yarn antlr4 --lang flinksql');
}

View File

@ -1,33 +1,38 @@
// For a detailed explanation regarding each configuration property, visit:
// https://jestjs.io/docs/en/configuration.html
/*
* For a detailed explanation regarding each configuration property, visit:
* https://jestjs.io/docs/configuration
*/
module.exports = {
// All imported modules in your tests should be mocked automatically
// automock: false,
// Stop running tests after `n` failures
// bail: 0,
// Respect "browser" field in package.json when resolving modules
// browser: false,
// The directory where Jest should store its cached dependency information
// cacheDirectory: "/private/var/folders/xr/54w2mws93hj3p3_ysc347flc0000gn/T/jest_dx",
// cacheDirectory: "/private/var/folders/70/21p94l8j6cd9vv9t990g8cj00000gn/T/jest_dx",
// Automatically clear mock calls and instances between every test
// clearMocks: false,
// Automatically clear mock calls, instances, contexts and results before every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
// collectCoverage: false,
collectCoverage: true,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: null,
// collectCoverageFrom: undefined,
// The directory where Jest should output its coverage files
// coverageDirectory: null,
coverageDirectory: "coverage",
// An array of regexp pattern strings used to skip coverage collection
coveragePathIgnorePatterns: ['/node_modules/'],
// coveragePathIgnorePatterns: [
// "/node_modules/"
// ],
// Indicates which provider should be used to instrument code for coverage
// coverageProvider: "babel",
// A list of reporter names that Jest uses when writing coverage reports
// coverageReporters: [
@ -38,27 +43,33 @@ module.exports = {
// ],
// An object that configures minimum threshold enforcement for coverage results
// coverageThreshold: null,
// coverageThreshold: undefined,
// A path to a custom dependency extractor
// dependencyExtractor: null,
// dependencyExtractor: undefined,
// Make calling deprecated APIs throw helpful error messages
// errorOnDeprecated: false,
// The default configuration for fake timers
// fakeTimers: {
// "enableGlobally": false
// },
// Force coverage collection from ignored files using an array of glob patterns
// forceCoverageMatch: [],
// A path to a module which exports an async function that is triggered once before all test suites
// globalSetup: null,
// globalSetup: undefined,
// A path to a module which exports an async function that is triggered once after all test suites
// globalTeardown: null,
// globalTeardown: undefined,
// A set of global variables that need to be available in all test environments
globals: {
window: {},
},
// globals: {},
// The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers.
// maxWorkers: "50%",
// An array of directory names to be searched recursively up from the requiring module's location
// moduleDirectories: [
@ -66,16 +77,18 @@ module.exports = {
// ],
// An array of file extensions your modules use
// moduleFileExtensions: [
// "js",
// "json",
// "jsx",
// "ts",
// "tsx",
// "node"
// ],
moduleFileExtensions: [
"js",
"mjs",
"cjs",
"jsx",
"ts",
"tsx",
"json",
"node"
],
// A map from regular expressions to module names that allow to stub out resources with a single module
// A map from regular expressions to module names or to arrays of module names that allow to stub out resources with a single module
// moduleNameMapper: {},
// An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader
@ -88,28 +101,28 @@ module.exports = {
// notifyMode: "failure-change",
// A preset that is used as a base for Jest's configuration
// preset: null,
preset: "ts-jest/presets/js-with-ts-esm",
// Run tests from one or more projects
// projects: null,
// projects: undefined,
// Use this configuration option to add custom reporters to Jest
// reporters: undefined,
// Automatically reset mock state between every test
// Automatically reset mock state before every test
// resetMocks: false,
// Reset the module registry before running each individual test
// resetModules: false,
// A path to a custom resolver
// resolver: null,
// resolver: undefined,
// Automatically restore mock state between every test
// Automatically restore mock state and implementation before every test
// restoreMocks: false,
// The root directory that Jest should scan for tests and modules within
// rootDir: null,
// rootDir: undefined,
// A list of paths to directories that Jest should use to search for files in
// roots: [
@ -125,11 +138,14 @@ module.exports = {
// A list of paths to modules that run some code to configure or set up the testing framework before each test
// setupFilesAfterEnv: [],
// The number of seconds after which a test is considered as slow and reported as such in the results.
// slowTestThreshold: 5,
// A list of paths to snapshot serializer modules Jest should use for snapshot testing
// snapshotSerializers: [],
// The test environment that will be used for testing
testEnvironment: 'node',
testEnvironment: "node",
// Options that will be passed to the testEnvironment
// testEnvironmentOptions: {},
@ -138,46 +154,47 @@ module.exports = {
// testLocationInResults: false,
// The glob patterns Jest uses to detect test files
// testMatch: [
// "**/__tests__/**/*.[jt]s?(x)",
// "**/?(*.)+(spec|test).[tj]s?(x)"
// ],
testMatch: [
"**/__tests__/**/*.[jt]s?(x)",
"**/?(*.)+(spec|test).[tj]s?(x)"
],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
testPathIgnorePatterns: ['/node_modules/'],
// testPathIgnorePatterns: [
// "/node_modules/"
// ],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],
// This option allows the use of a custom results processor
// testResultsProcessor: null,
// testResultsProcessor: undefined,
// This option allows use of a custom test runner
// testRunner: "jasmine2",
// This option sets the URL for the jsdom environment. It is reflected in properties such as location.href
// testURL: "http://localhost",
// Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout"
// timers: "real",
// testRunner: "jest-circus/runner",
// A map from regular expressions to paths to transformers
transform: {
'^.+\\.(t|j)sx?$': 'ts-jest',
'\\.[jt]sx?$': 'ts-jest',
},
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
transformIgnorePatterns: ['/node_modules/'],
transformIgnorePatterns: [
'/node_modules/(?!antlr4)'
],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
// Indicates whether each individual test should be reported during the run
// verbose: null,
// verbose: undefined,
// An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode
// watchPathIgnorePatterns: [],
// Whether to use watchman for file crawling
// watchman: true,
moduleNameMapper : {
'^antlr4$': '<rootDir>/node_modules/antlr4/src/antlr4/index.web.js',
}
};

View File

@ -19,21 +19,23 @@
"build": "rm -rf dist && tsc",
"eslint": "eslint ./src/**/*.ts",
"check-types": "tsc --skipLibCheck",
"test": "jest",
"test": "NODE_OPTIONS=--max_old_space_size=4096 && jest",
"release": "npm run build && standard-version --infile CHANGELOG.md"
},
"author": "dt-insight-front",
"license": "MIT",
"devDependencies": {
"@types/jest": "^24.0.13",
"@types/jest": "^29.5.1",
"@types/node": "^18.15.11",
"@typescript-eslint/eslint-plugin": "^3.10.1",
"@typescript-eslint/parser": "^3.10.1",
"eslint": "^7.7.0",
"eslint": "^7.32.0",
"eslint-config-google": "^0.14.0",
"jest": "^24.8.0",
"ts-jest": "^24.1.0",
"typescript": "^4.9.4",
"standard-version": "^9.1.0"
"jest": "^29.5.0",
"standard-version": "^9.5.0",
"ts-jest": "^29.1.0",
"typescript": "^5.0.4",
"yargs-parser": "^21.1.1"
},
"git repository": "https://github.com/DTStack/dt-sql-parser",
"repository": "https://github.com/DTStack/dt-sql-parser",
@ -41,7 +43,6 @@
"registry": "https://registry.npmjs.org/"
},
"dependencies": {
"@types/antlr4": "4.7.0",
"antlr4": "4.7.2"
"antlr4": "^4.12.0"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

6618
src/lib/generic/SqlLexer.ts Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

78974
src/lib/generic/SqlParser.ts Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

33635
src/lib/hive/HiveSql.ts Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

2108
src/lib/hive/HiveSqlLexer.ts Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
// https://github.com/antlr/grammars-v4/blob/master/sql/postgresql/Java/PostgreSQLLexerBase.java
// eslint-disable-next-line no-invalid-this
const __extends = (this && this.__extends) || (function() {
let extendStatics = function(d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function(d, b) {
d.__proto__ = b;
}) ||
function(d, b) {
for (const p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p];
};
return extendStatics(d, b);
};
return function(d, b) {
if (typeof b !== 'function' && b !== null) {
throw new TypeError('Class extends value ' + String(b) + ' is not a constructor or null');
}
extendStatics(d, b);
function __() {
this.constructor = d;
}
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
const Lexer = require('antlr4').Lexer;
function isLetter(str) {
return str.length === 1 && str.match(/[a-z]/i);
}
function PostgreSQLLexerBase(input) {
const _this = Lexer.call(this, input) || this;
_this.tags = [];
return _this;
}
__extends(PostgreSQLLexerBase, Lexer);
PostgreSQLLexerBase.prototype.pushTag = function() {
this.tags.push(getText());
};
PostgreSQLLexerBase.prototype.isTag = function() {
return this.getText().equals(this.tags.peek());
};
PostgreSQLLexerBase.prototype.popTag = function() {
this.tags.pop();
};
PostgreSQLLexerBase.prototype.getInputStream = function() {
return this._input;
};
PostgreSQLLexerBase.prototype.checkLA = function(c) {
// eslint-disable-next-line new-cap
return this.getInputStream().LA(1) !== c;
};
PostgreSQLLexerBase.prototype.charIsLetter = function() {
// eslint-disable-next-line new-cap
return isLetter(this.getInputStream().LA(-1));
};
PostgreSQLLexerBase.prototype.HandleNumericFail = function() {
this.getInputStream().seek(this.getInputStream().index() - 2);
const Integral = 535;
this.setType(Integral);
};
PostgreSQLLexerBase.prototype.HandleLessLessGreaterGreater = function() {
const LESS_LESS = 18;
const GREATER_GREATER = 19;
if (this.getText() === '<<') {
this.setType(LESS_LESS);
}
if (this.getText() === '>>') {
this.setType(GREATER_GREATER);
}
};
PostgreSQLLexerBase.prototype.UnterminatedBlockCommentDebugAssert = function() {
// Debug.Assert(InputStream.LA(1) == -1 /*EOF*/);
};
PostgreSQLLexerBase.prototype.CheckIfUtf32Letter = function() {
// eslint-disable-next-line new-cap
let codePoint = this.getInputStream().LA(-2) << 8 + this.getInputStream().LA(-1);
let c;
if (codePoint < 0x10000) {
c = String.fromCharCode(codePoint);
} else {
codePoint -= 0x10000;
c = String.fromCharCode(codePoint / 0x400 + 0xd800, codePoint % 0x400 + 0xdc00);
}
return isLetter(c[0]);
};
exports.PostgreSQLLexerBase = PostgreSQLLexerBase;

View File

@ -0,0 +1,72 @@
import { Lexer } from 'antlr4';
function isLetter(str) {
return str.length === 1 && str.match(/[a-z]/i);
}
export default class PostgreSQLLexerBase extends Lexer {
tags: string[] = [];
CheckIfUtf32Letter() {
// eslint-disable-next-line new-cap
let codePoint = this.getInputStream().LA(-2) << 8 + this.getInputStream().LA(-1);
let c;
if (codePoint < 0x10000) {
c = String.fromCharCode(codePoint);
} else {
codePoint -= 0x10000;
c = String.fromCharCode(codePoint / 0x400 + 0xd800, codePoint % 0x400 + 0xdc00);
}
return isLetter(c[0]);
}
UnterminatedBlockCommentDebugAssert() {
// Debug.Assert(InputStream.LA(1) == -1 /*EOF*/);
}
HandleLessLessGreaterGreater() {
const LESS_LESS = 18;
const GREATER_GREATER = 19;
if (this.text === '<<') {
this._type = LESS_LESS;
}
if (this.text === '>>') {
this._type = GREATER_GREATER;
}
}
HandleNumericFail() {
this.getInputStream().seek(this.getInputStream().index - 2);
const Integral = 535;
this._type = Integral;
}
charIsLetter() {
// eslint-disable-next-line new-cap
return isLetter(this.getInputStream().LA(-1));
}
pushTag() {
this.tags.push(this.text);
};
isTag() {
return this.text === this.tags.pop();
}
popTag() {
this.tags.pop();
}
getInputStream() {
return this._input;
}
checkLA(c) {
// eslint-disable-next-line new-cap
return this.getInputStream().LA(1) !== c;
}
}

View File

@ -1,149 +0,0 @@
/* eslint-disable new-cap,camelcase */
// https://github.com/antlr/grammars-v4/blob/master/sql/postgresql/Java/PostgreSQLParserBase.java
// eslint-disable-next-line no-invalid-this
const __extends = (this && this.__extends) || (function() {
let extendStatics = function(d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function(d, b) {
d.__proto__ = b;
}) ||
function(d, b) {
for (const p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p];
};
return extendStatics(d, b);
};
return function(d, b) {
if (typeof b !== 'function' && b !== null) {
throw new TypeError('Class extends value ' + String(b) + ' is not a constructor or null');
}
extendStatics(d, b);
function __() {
this.constructor = d;
}
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
const PostgreSQLLexer_1 = require('../PostgreSQLLexer');
const PostgreSQLParser_1 = require('../PostgreSQLParser');
const antlr4 = require('antlr4/index');
const CharStreams = antlr4.CharStreams;
const CommonTokenStream = antlr4.CommonTokenStream;
const Parser = antlr4.Parser;
__extends(PostgreSQLParserBase, Parser);
function PostgreSQLParserBase(input) {
return Parser.call(this, input) || this;
}
PostgreSQLParserBase.prototype.GetParsedSqlTree = function(script, line) {
const ph = this.getPostgreSQLParser(script);
return ph.program();
};
PostgreSQLParserBase.prototype.ParseRoutineBody = function(_localctx) {
let lang = null;
for (let _i = 0, _a = _localctx.createfunc_opt_item(); _i < _a.length; _i++) {
const coi = _a[_i];
if (!!coi.LANGUAGE()) {
if (!!coi.nonreservedword_or_sconst()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword().identifier()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword().identifier().Identifier()) {
lang = coi.nonreservedword_or_sconst().nonreservedword().identifier().Identifier().getText();
break;
}
}
}
}
}
}
if (!lang) {
return;
}
// eslint-disable-next-line camelcase
let func_as = null;
for (let _b = 0, _c = _localctx.createfunc_opt_item(); _b < _c.length; _b++) {
const a = _c[_b];
if (!a.func_as()) {
// eslint-disable-next-line camelcase
func_as = a;
break;
}
}
// eslint-disable-next-line camelcase
if (!!func_as) {
const txt = this.GetRoutineBodyString(func_as.func_as().sconst(0));
const line = func_as.func_as().sconst(0).start.getLine();
const ph = this.getPostgreSQLParser(txt);
switch (lang) {
case 'plpgsql':
func_as.func_as().Definition = ph.plsqlroot();
break;
case 'sql':
func_as.func_as().Definition = ph.program();
break;
}
}
};
PostgreSQLParserBase.prototype.TrimQuotes = function(s) {
return (!s) ? s : s.substring(1, s.length() - 1);
};
PostgreSQLParserBase.prototype.unquote = function(s) {
const slength = s.length();
const r = '';
let i = 0;
while (i < slength) {
const c = s.charAt(i);
r.append(c);
if (c === '\'' && i < slength - 1 && (s.charAt(i + 1) === '\'')) {
i++;
}
i++;
}
return r.toString();
};
PostgreSQLParserBase.prototype.GetRoutineBodyString = function(rule) {
const anysconst = rule.anysconst();
const StringConstant = anysconst.StringConstant();
if (!!StringConstant) {
return this.unquote(this.TrimQuotes(StringConstant.getText()));
}
const UnicodeEscapeStringConstant = anysconst.UnicodeEscapeStringConstant();
if (!!UnicodeEscapeStringConstant) {
return this.TrimQuotes(UnicodeEscapeStringConstant.getText());
}
const EscapeStringConstant = anysconst.EscapeStringConstant();
if (!!EscapeStringConstant) {
return this.TrimQuotes(EscapeStringConstant.getText());
}
let result = '';
const dollartext = anysconst.DollarText();
for (let _i = 0, dollartext_1 = dollartext; _i < dollartext_1.length; _i++) {
const s = dollartext_1[_i];
result += s.getText();
}
return result;
};
PostgreSQLParserBase.getPostgreSQLParser = function(script) {
const charStream = CharStreams.fromString(script);
const lexer = new PostgreSQLLexer_1.PostgreSQLLexer(charStream);
const tokens = new CommonTokenStream(lexer);
const parser = new PostgreSQLParser_1.PostgreSQLParser(tokens);
lexer.removeErrorListeners();
parser.removeErrorListeners();
// LexerDispatchingErrorListener listener_lexer = new LexerDispatchingErrorListener((Lexer)(((CommonTokenStream)(this.getInputStream())).getTokenSource()));
// ParserDispatchingErrorListener listener_parser = new ParserDispatchingErrorListener(this);
// lexer.addErrorListener(listener_lexer);
// parser.addErrorListener(listener_parser);
return parser;
};
exports.PostgreSQLParserBase = PostgreSQLParserBase;

View File

@ -0,0 +1,110 @@
/* eslint-disable new-cap,camelcase */
import { Parser, CharStreams, CommonTokenStream } from 'antlr4';
import PostgreSQLLexer from '../PostgreSQLLexer';
import PostgreSQLParser from '../PostgreSQLParser';
export default class PostgreSQLParserBase extends Parser {
getPostgreSQLParser(script) {
const charStream = CharStreams.fromString(script);
const lexer = new PostgreSQLLexer(charStream);
const tokens = new CommonTokenStream(lexer);
const parser = new PostgreSQLParser(tokens);
return parser;
}
GetParsedSqlTree(script, line) {
const ph = this.getPostgreSQLParser(script);
return ph.program();
}
ParseRoutineBody(_localctx) {
let lang = null;
for (let _i = 0, _a = _localctx.createfunc_opt_item(); _i < _a.length; _i++) {
const coi = _a[_i];
if (!!coi.LANGUAGE()) {
if (!!coi.nonreservedword_or_sconst()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword().identifier()) {
if (!!coi.nonreservedword_or_sconst().nonreservedword().identifier().Identifier()) {
lang = coi.nonreservedword_or_sconst().nonreservedword().identifier().Identifier().getText();
break;
}
}
}
}
}
}
if (!lang) {
return;
}
// eslint-disable-next-line camelcase
let func_as = null;
for (let _b = 0, _c = _localctx.createfunc_opt_item(); _b < _c.length; _b++) {
const a = _c[_b];
if (!a.func_as()) {
// eslint-disable-next-line camelcase
func_as = a;
break;
}
}
// eslint-disable-next-line camelcase
if (!!func_as) {
const txt = this.GetRoutineBodyString(func_as.func_as().sconst(0));
// @ts-ignore
const line = func_as.func_as().sconst(0).start.getLine();
const ph = this.getPostgreSQLParser(txt);
switch (lang) {
case 'plpgsql':
func_as.func_as().Definition = ph.plsqlroot();
break;
case 'sql':
func_as.func_as().Definition = ph.program();
break;
}
}
}
TrimQuotes(s: string) {
return (!s) ? s : s.substring(1, s.length - 1);
}
unquote(s: string) {
const slength = s.length;
let r = '';
let i = 0;
while (i < slength) {
const c = s.charAt(i);
r = r.concat(c);
if (c === '\'' && i < slength - 1 && (s.charAt(i + 1) === '\'')) {
i++;
}
i++;
}
return r.toString();
};
GetRoutineBodyString(rule) {
const anysconst = rule.anysconst();
const StringConstant = anysconst.StringConstant();
if (!!StringConstant) {
return this.unquote(this.TrimQuotes(StringConstant.getText()));
}
const UnicodeEscapeStringConstant = anysconst.UnicodeEscapeStringConstant();
if (!!UnicodeEscapeStringConstant) {
return this.TrimQuotes(UnicodeEscapeStringConstant.getText());
}
const EscapeStringConstant = anysconst.EscapeStringConstant();
if (!!EscapeStringConstant) {
return this.TrimQuotes(EscapeStringConstant.getText());
}
let result = '';
const dollartext = anysconst.DollarText();
for (let _i = 0, dollartext_1 = dollartext; _i < dollartext_1.length; _i++) {
const s = dollartext_1[_i];
result += s.getText();
}
return result;
}
}

View File

@ -1,16 +0,0 @@
const Lexer = require('antlr4').Lexer;
function PlSqlBaseLexer(...args) {
Lexer.call(this, ...args);
return this;
}
PlSqlBaseLexer.prototype = Object.create(Lexer.prototype);
PlSqlBaseLexer.prototype.constructor = PlSqlBaseLexer;
PlSqlBaseLexer.prototype.IsNewlineAtPos = function(pos) {
const la = this._input.LA(pos);
return la == -1 || la == '\n';
};
exports.PlSqlBaseLexer = PlSqlBaseLexer;

View File

@ -1,27 +0,0 @@
const Parser = require('antlr4').Parser;
function PlSqlBaseParser(...args) {
Parser.call(this, ...args);
this._isVersion10 = false;
this._isVersion12 = true;
return this;
}
PlSqlBaseParser.prototype = Object.create(Parser.prototype);
PlSqlBaseParser.prototype.constructor = PlSqlBaseParser;
PlSqlBaseParser.prototype.isVersion10 = function() {
return this._isVersion10;
};
PlSqlBaseParser.prototype.isVersion12 = function() {
return this._isVersion12;
};
PlSqlBaseParser.prototype.setVersion10 = function(value) {
this._isVersion10 = value;
};
PlSqlBaseParser.prototype.setVersion12 = function(value) {
this._isVersion12 = value;
};
exports.PlSqlBaseParser = PlSqlBaseParser;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

16230
src/lib/plsql/PlSqlLexer.ts Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

193036
src/lib/plsql/PlSqlParser.ts Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
import { Lexer } from "antlr4";
export default class PlSqlBaseLexer extends Lexer {
IsNewlineAtPos(pos: number): boolean {
const la = this._input.LA(pos);
return la == -1;
}
}

View File

@ -0,0 +1,20 @@
import { Parser } from 'antlr4';
export default class PlSqlBaseParser extends Parser {
private _isVersion10: boolean = false;
private _isVersion12: boolean = true;
public isVersion10(): boolean {
return this._isVersion10;
}
public isVersion12(): boolean {
return this._isVersion12;
}
public setVersion10(value: boolean): void {
this._isVersion10 = value;
}
public setVersion12(value: boolean): void {
this._isVersion12 = value;
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
import { Lexer } from "antlr4";
export default class SparkSqlBaseLexer extends Lexer {
isValidDecimal() {
let nextChar = this.fromCodePoint(this._input.LA(1));
return !(nextChar >= 'A' && nextChar <= 'Z' || nextChar >= '0' && nextChar <= '9' || nextChar == '_')
}
/**
* This method will be called when we see '/*' and try to match it as a bracketed comment.
* If the next character is '+', it should be parsed as hint later, and we cannot match
* it as a bracketed comment.
*
* Returns true if the next character is '+'.
*/
isHint() {
let nextChar = this.fromCodePoint(this._input.LA(1));
return nextChar == '+'
}
fromCodePoint(codePoint) {
return String.fromCodePoint(codePoint);
}
}

View File

@ -1,21 +1,27 @@
import { Token, Lexer } from 'antlr4';
import { ParseTreeWalker } from 'antlr4/tree';
import { ParseTreeWalker, CommonTokenStream } from 'antlr4';
import type { Parser } from 'antlr4/src/antlr4';
import ParserErrorListener, {
ParserError,
ErrorHandler,
ParserErrorCollector,
} from './parserErrorListener';
interface IParser {
// Lost in type definition
ruleNames: string[];
// Customized in our parser
program(): any;
}
/**
* Custom Parser class, subclass needs extends it.
*/
export default abstract class BasicParser<C = any> {
private _parser;
export default abstract class BasicParser {
private _parser: IParser & Parser;
public parse(
input: string,
errorListener?: ErrorHandler,
errorListener?: ErrorHandler<any>,
) {
const parser = this.createParser(input);
this._parser = parser;
@ -46,33 +52,31 @@ export default abstract class BasicParser<C = any> {
* Create antrl4 Lexer object
* @param input source string
*/
public abstract createLexer(input: string): Lexer;
public abstract createLexer(input: string);
/**
* Create Parser by lexer
* @param lexer Lexer
*/
public abstract createParserFromLexer(lexer: Lexer);
public abstract createParserFromLexer(lexer);
/**
* Visit parser tree
* @param parserTree
*/
// public abstract visit(visitor: any, parserTree: any);
/**
* The source string
* Get all Tokens of input string
* @param input string
* @returns Token[]
*/
public getAllTokens(input: string): Token[] {
return this.createLexer(input).getAllTokens();
public getAllTokens(input: string): string[] {
const lexer = this.createLexer(input);
const tokensStream = new CommonTokenStream(lexer);
tokensStream.fill();
return tokensStream.tokens;
};
/**
* Get Parser instance by input string
* @param input
*/
public createParser(input: string) {
public createParser(input: string): IParser & Parser {
const lexer = this.createLexer(input);
const parser: any = this.createParserFromLexer(lexer);
parser.buildParseTrees = true;

View File

@ -1,6 +1,4 @@
import { Token, Recognizer } from 'antlr4';
import { ErrorListener } from 'antlr4/error';
import { Token, Recognizer, ErrorListener, RecognitionException } from 'antlr4';
export interface ParserError {
startLine: number;
endLine: number;
@ -9,8 +7,8 @@ export interface ParserError {
message: string;
}
export interface SyntaxError {
recognizer: Recognizer;
export interface SyntaxError<T> {
recognizer: Recognizer<T>;
offendingSymbol: Token;
line: number;
charPositionInLine: number;
@ -18,9 +16,13 @@ export interface SyntaxError {
e: any;
}
export type ErrorHandler = (err: ParserError, errOption: SyntaxError) => void;
type ErrorOffendingSymbol = {
text: string;
};
export class ParserErrorCollector extends ErrorListener {
export type ErrorHandler<T> = (err: ParserError, errOption: SyntaxError<T>) => void;
export class ParserErrorCollector extends ErrorListener<ErrorOffendingSymbol> {
private _errors: ParserError[];
constructor(error: ParserError[]) {
@ -29,8 +31,8 @@ export class ParserErrorCollector extends ErrorListener {
}
syntaxError(
recognizer: Recognizer, offendingSymbol: Token, line: number,
charPositionInLine: number, msg: string, e: any,
recognizer: Recognizer<ErrorOffendingSymbol>, offendingSymbol: ErrorOffendingSymbol, line: number,
charPositionInLine: number, msg: string, e: RecognitionException,
) {
let endCol = charPositionInLine + 1;
if (offendingSymbol && offendingSymbol.text !== null) {
@ -47,16 +49,16 @@ export class ParserErrorCollector extends ErrorListener {
}
export default class ParserErrorListener extends ErrorListener {
export default class ParserErrorListener extends ErrorListener<ErrorOffendingSymbol> {
private _errorHandler;
constructor(errorListener: ErrorHandler) {
constructor(errorListener: ErrorHandler<ErrorOffendingSymbol>) {
super();
this._errorHandler = errorListener;
}
syntaxError(
recognizer: Recognizer, offendingSymbol: Token, line: number,
recognizer: Recognizer<ErrorOffendingSymbol>, offendingSymbol: ErrorOffendingSymbol, line: number,
charPositionInLine: number, msg: string, e: any,
) {
let endCol = charPositionInLine + 1;

View File

@ -1,17 +1,17 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { FlinkSqlLexer } from '../lib/flinksql/FlinkSqlLexer';
import { FlinkSqlParser } from '../lib/flinksql/FlinkSqlParser';
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import FlinkSqlLexer from '../lib/flinksql/FlinkSqlLexer';
import FlinkSqlParser from '../lib/flinksql/FlinkSqlParser';
import BasicParser from './common/basicParser';
export default class FlinkSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = <unknown> new FlinkSqlLexer(chars) as Lexer;
public createLexer(input: string): FlinkSqlLexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new FlinkSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
const tokenStream = new CommonTokenStream(lexer);
return new FlinkSqlParser(tokenStream);
public createParserFromLexer(lexer: Lexer): FlinkSqlParser {
const tokens = new CommonTokenStream(lexer);
const parser = new FlinkSqlParser(tokens);
return parser;
}
}

View File

@ -1,15 +1,15 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { SqlLexer } from '../lib/generic/SqlLexer';
import { SqlParser } from '../lib/generic/SqlParser';
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import SqlLexer from '../lib/generic/SqlLexer';
import SqlParser from '../lib/generic/SqlParser';
import BasicParser from './common/basicParser';
export default class GenericSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = <unknown> new SqlLexer(chars) as Lexer;
public createLexer(input: string): SqlLexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new SqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
public createParserFromLexer(lexer: Lexer): SqlParser {
const tokenStream = new CommonTokenStream(lexer);
return new SqlParser(tokenStream);
}

View File

@ -1,15 +1,15 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
import { HiveSql } from '../lib/hive/HiveSql';
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import HiveSqlLexer from '../lib/hive/HiveSqlLexer';
import HiveSql from '../lib/hive/HiveSql';
import BasicParser from './common/basicParser';
export default class HiveSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input);
const lexer = <unknown> new HiveSqlLexer(chars) as Lexer;
public createLexer(input: string): HiveSqlLexer {
const chars = new CharStream(input);
const lexer = new HiveSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
public createParserFromLexer(lexer: Lexer): HiveSql {
const tokenStream = new CommonTokenStream(lexer);
return new HiveSql(tokenStream);
}

View File

@ -1,13 +1,13 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { PostgreSQLLexer } from '../lib/pgsql/PostgreSQLLexer';
import { PostgreSQLParser } from '../lib/pgsql/PostgreSQLParser';
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import BasicParser from './common/basicParser';
import PostgreSQLLexer from '../lib/pgsql/PostgreSQLLexer';
import PostgreSQLParser from '../lib/pgsql/PostgreSQLParser';
export default class PostgresSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input.toUpperCase());
const lexer = <unknown> new PostgreSQLLexer(chars) as Lexer;
public createLexer(input: string): PostgreSQLLexer {
const chars = new CharStream(input.toUpperCase());
const lexer = new PostgreSQLLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {

View File

@ -1,16 +1,16 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { PlSqlLexer } from '../lib/plsql/PlSqlLexer';
import { PlSqlParser } from '../lib/plsql/PlSqlParser';
import { CharStream, CommonTokenStream } from 'antlr4';
import BasicParser from './common/basicParser';
import PlSqlLexer from '../lib/plsql/PlSqlLexer';
import PlSqlParser from '../lib/plsql/PlSqlParser';
export default class PLSQLParser extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input.toUpperCase());
const lexer = <unknown> new PlSqlLexer(chars) as Lexer;
public createLexer(input: string): PlSqlLexer {
const chars = new CharStream(input.toUpperCase());
const lexer = new PlSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
public createParserFromLexer(lexer: PlSqlLexer): PlSqlParser {
const tokenStream = new CommonTokenStream(lexer);
return new PlSqlParser(tokenStream);
}

View File

@ -1,12 +1,12 @@
import { InputStream, CommonTokenStream, Lexer } from 'antlr4';
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
import { SparkSqlParser } from '../lib/spark/SparkSqlParser';
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import BasicParser from './common/basicParser';
import SparkSqlLexer from '../lib/spark/SparkSqlLexer';
import SparkSqlParser from '../lib/spark/SparkSqlParser';
export default class SparkSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new InputStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = <unknown> new SparkSqlLexer(chars) as Lexer;
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new SparkSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../src';
import FlinkSQL from '../../../src/parser/flinksql';
describe('FlinkSQL Lexer tests', () => {
const parser = new FlinkSQL();
@ -7,6 +7,6 @@ describe('FlinkSQL Lexer tests', () => {
const tokens = parser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length).toBe(7);
expect(tokens.length - 1).toBe(7);
});
});

View File

@ -1,4 +1,6 @@
import { FlinkSQL, FlinkSqlParserListener } from '../../../src';
import FlinkSQL from '../../../src/parser/flinksql';
import FlinkSqlParserListener from '../../../src/lib/flinksql/FlinkSqlParserListener';
import { TableExpressionContext } from '../../../src/lib/flinksql/FlinkSqlParser';
describe('Flink SQL Listener Tests', () => {
const expectTableName = 'user1';
@ -10,11 +12,16 @@ describe('Flink SQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends FlinkSqlParserListener {
enterTableExpression(ctx): void {
constructor() {
super()
}
enterTableExpression = (ctx: TableExpressionContext): void => {
result = ctx.getText().toLowerCase();
}
}
const listenTableName: any = new MyListener();
const listenTableName = new MyListener();
await parser.listen(listenTableName, parserTree);
expect(result).toBe(expectTableName);

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../src';
import FlinkSQL from '../../../src/parser/flinksql';
describe('FlinkSQL Syntax Tests', () => {
const parser = new FlinkSQL();
@ -22,6 +22,11 @@ describe('FlinkSQL Syntax Tests', () => {
const result = parser.validate(sql);
expect(result.length).toBe(0);
});
test('Test simple Error Select Statement', () => {
const sql = `SELECTproduct, amount FROM;`;
const result = parser.validate(sql);
expect(result.length).toBe(1);
});
test('Test Select Statement with where clause', () => {
const sql = `SELECT * FROM person WHERE id = 200 OR id = 300;`;
const result = parser.validate(sql);
@ -217,7 +222,6 @@ describe('FlinkSQL Syntax Tests', () => {
test('Test valid Double Line Comment statement', () => {
const sql = `----test comment\n`;
const result = parser.validate(sql);
console.log('res:', result);
expect(result.length).toBe(0);
});

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../../src';
import FlinkSQL from "../../../../src/parser/flinksql";
describe('FlinkSQL Create Table Syntax Tests', () => {
const parser = new FlinkSQL();

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../../src';
import FlinkSQL from "../../../../src/parser/flinksql";
describe('FlinkSQL Create Table Syntax Tests', () => {
const parser = new FlinkSQL();

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../../src';
import FlinkSQL from '../../../../src/parser/flinksql';
describe('FlinkSQL Create Table Syntax Tests', () => {
const parser = new FlinkSQL();

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../../src';
import FlinkSQL from "../../../../src/parser/flinksql";
describe('FlinkSQL Create Table Syntax Tests', () => {
const parser = new FlinkSQL();

View File

@ -1,4 +1,4 @@
import { FlinkSQL } from '../../../../src';
import FlinkSQL from "../../../../src/parser/flinksql";
describe('FlinkSQL Create Table Syntax Tests', () => {
const parser = new FlinkSQL();

View File

@ -1,4 +1,5 @@
import { FlinkSQL, FlinkSqlParserVisitor } from '../../../src';
import FlinkSQL from '../../../src/parser/flinksql';
import FlinkSqlParserVisitor from '../../../src/lib/flinksql/FlinkSqlParserVisitor';
describe('Flink SQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -9,14 +10,11 @@ describe('Flink SQL Visitor Tests', () => {
console.log('Parse error:', error);
});
// console.log('Parser tree string:', parser.toString(parserTree));
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends FlinkSqlParserVisitor {
visitTableExpression(ctx): void {
class MyVisitor extends FlinkSqlParserVisitor<any>{
visitTableExpression = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTableExpression(ctx);
}
}
const visitor: any = new MyVisitor();

View File

@ -1,4 +1,4 @@
import { GenericSQL } from '../../../src/';
import GenericSQL from '../../../src/parser/generic';
describe('GenericSQL Lexer tests', () => {
const mysqlParser = new GenericSQL();
@ -7,6 +7,6 @@ describe('GenericSQL Lexer tests', () => {
const tokens = mysqlParser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length).toBe(12);
expect(tokens.length - 1).toBe(12);
});
});

View File

@ -1,4 +1,5 @@
import { GenericSQL, SqlParserListener } from '../../../src';
import GenericSQL from '../../../src/parser/generic';
import SqlParserListener from '../../../src/lib/generic/SqlParserListener';
describe('Generic SQL Listener Tests', () => {
const expectTableName = 'user1';
@ -10,7 +11,7 @@ describe('Generic SQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends SqlParserListener {
enterTableName(ctx): void {
enterTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
}
}

View File

@ -1,4 +1,4 @@
import { GenericSQL } from '../../../src';
import GenericSQL from '../../../src/parser/generic';
describe('Generic SQL Syntax Tests', () => {
const parser = new GenericSQL();

View File

@ -1,4 +1,5 @@
import { GenericSQL, SqlParserVisitor } from '../../../src';
import GenericSQL from '../../../src/parser/generic';
import SqlParserVisitor from '../../../src/lib/generic/SqlParserVisitor';
describe('Generic SQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -9,17 +10,19 @@ describe('Generic SQL Visitor Tests', () => {
console.log('Parse error:', error);
});
console.log('Parser tree string:', parser.toString(parserTree));
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends SqlParserVisitor {
visitTableName(ctx): void {
class MyVisitor extends SqlParserVisitor<any> {
constructor() {
super();
}
visitTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTableName(ctx);
super.visitTableName?.(ctx);
}
}
const visitor: any = new MyVisitor();
const visitor = new MyVisitor();
visitor.visit(parserTree);
expect(result).toBe(expectTableName);

View File

@ -1,16 +1,16 @@
import { HiveSQL } from '../../../src';
import HiveSQL from '../../../src/parser/hive';
describe('HiveSQL Lexer tests', () => {
const parser = new HiveSQL();
test('select token counts', () => {
const sql = 'SELECT * FROM t1';
const tokens = parser.getAllTokens(sql);
expect(tokens.length).toBe(4);
expect(tokens.length - 1).toBe(4);
});
test('select token counts', () => {
const sql = 'show create table_name;';
const tokens = parser.getAllTokens(sql);
expect(tokens.length).toBe(4);
expect(tokens.length - 1).toBe(4);
});
});

View File

@ -1,4 +1,6 @@
import { HiveSQL, HiveSqlListener } from '../../../src';
import HiveSqlListener from '../../../src/lib/hive/HiveSqlListener';
import HiveSQL from '../../../src/parser/hive';
describe('Hive SQL Listener Tests', () => {
const parser = new HiveSQL();
@ -9,7 +11,7 @@ describe('Hive SQL Listener Tests', () => {
let result = '';
class MyListener extends HiveSqlListener {
enterSelect_list(ctx): void {
enterSelect_list = (ctx): void => {
result = ctx.getText();
}
}
@ -23,7 +25,7 @@ describe('Hive SQL Listener Tests', () => {
const parserTree = parser.parse(sql);
let result = '';
class MyListener extends HiveSqlListener {
enterDrop_stmt(ctx): void {
enterDrop_stmt = (ctx): void => {
result = ctx.getText();
}
}

View File

@ -1,4 +1,4 @@
import { HiveSQL } from '../../../src';
import HiveSQL from '../../../src/parser/hive';
describe('Hive SQL Syntax Tests', () => {
const parser = new HiveSQL();

Some files were not shown because too many files have changed in this diff Show More