refactor: migrate antlr4 v4.12.0 to antlr4ts(4.9.0) (#106)

* build: ignore gen folder

* refactor: remove useless code

* fix: correct the Javascript usage in grammar

* refactor: move to antlr4ts

* fix: remove useless

* fix: update grammars for javascript target

* refactor: migrate to antlr4ts

* refactor: migrate to antlr4ts

* refactor: implements ParserErrorListener

* fix: rename the  start reserved word

* refactor: remove unused import

* refactor: migrate to antlr4ts

* test: update the expects of test cases

* refactor: migrate hive to antlr4ts

* refactor: update the incompatible syntax for antlr4ts

* refactor: migrate pgsql grammar to antlr4ts, increasing tests

* refactor: migrate the plsql to antlr4ts

* build: remove unused config

* build: migrate to antlr4ts

* build: migrate ts-jest to @swc/jest

* refactor: migrate to anltr4ts

* build: migrate ts-jest to @swc/jest
This commit is contained in:
Ziv 2023-05-30 14:44:03 +08:00 committed by GitHub
parent 793ff6ef0e
commit 34f64e6bea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
104 changed files with 436945 additions and 419757 deletions

View File

@ -65,13 +65,7 @@ jobs:
- name: Run Units Test
run: |
export NODE_OPTIONS="--max_old_space_size=4096"
npx jest test/utils
npx jest test/parser/flinksql
npx jest test/parser/spark
npx jest test/parser/generic
npx jest test/parser/hive
npx jest test/parser/pgsql
npx jest test/parser/plsql
yarn test
build:
runs-on: ubuntu-latest
needs: [setup]

Binary file not shown.

View File

@ -18,11 +18,10 @@ const entry = [
function compile(language) {
const cmd = `
java -jar ${antlr4}
-Dlanguage=TypeScript
antlr4ts
-visitor
-listener
-o ${output}/${language}
-Xexact-output-dir -o ${output}/${language}
${grammars}/${language}/*.g4
`.replace(/\n/g, '');
console.info('Executing:', cmd);

View File

@ -100,7 +100,7 @@ module.exports = {
// notifyMode: "failure-change",
// A preset that is used as a base for Jest's configuration
preset: "ts-jest/presets/js-with-ts-esm",
// preset: "ts-jest/presets/js-with-ts-esm",
// Run tests from one or more projects
// projects: undefined,
@ -171,19 +171,11 @@ module.exports = {
// A map from regular expressions to paths to transformers
transform: {
"\\.[jt]sx?$": [
"ts-jest",
{
tsconfig: {
noUnusedLocals: false,
},
},
],
"\\.[jt]sx?$": ['@swc/jest']
},
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
transformIgnorePatterns: ["/node_modules/.pnpm/(?!antlr4)"],
extensionsToTreatAsEsm: ['.ts', '.tsx'],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
@ -194,8 +186,5 @@ module.exports = {
// watchPathIgnorePatterns: [],
// Whether to use watchman for file crawling
// watchman: true,
moduleNameMapper: {
"^antlr4$": "<rootDir>/node_modules/antlr4/dist/antlr4.web.js",
},
// watchman: true
};

View File

@ -28,6 +28,8 @@
"author": "dt-insight-front",
"license": "MIT",
"devDependencies": {
"@swc/core": "^1.3.60",
"@swc/jest": "^0.2.26",
"@types/jest": "^29.5.1",
"@types/node": "^18.15.11",
"@typescript-eslint/eslint-plugin": "^3.10.1",
@ -36,7 +38,6 @@
"eslint-config-google": "^0.14.0",
"jest": "^29.5.0",
"standard-version": "^9.5.0",
"ts-jest": "^29.1.0",
"typescript": "^5.0.4",
"yargs-parser": "^21.1.1"
},
@ -46,6 +47,7 @@
"registry": "https://registry.npmjs.org/"
},
"dependencies": {
"antlr4": "^4.12.0"
"antlr4ts": "^0.5.0-alpha.4",
"antlr4ts-cli": "^0.5.0-alpha.4"
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,441 +1,553 @@
/**
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
lexer grammar HiveSqlLexer;
// Lexer rules
T_ACTION : A C T I O N ;
T_ADD2 : A D D ;
T_ALL : A L L ;
T_ALLOCATE : A L L O C A T E ;
T_ALTER : A L T E R ;
T_AND : A N D ;
T_ANSI_NULLS : A N S I '_' N U L L S ;
T_ANSI_PADDING : A N S I '_' P A D D I N G ;
T_AS : A S ;
T_ASC : A S C ;
T_ASSOCIATE : A S S O C I A T E ;
T_AT : A T ;
T_AUTO_INCREMENT : A U T O '_' I N C R E M E N T ;
T_AVG : A V G ;
T_BATCHSIZE : B A T C H S I Z E ;
T_BEGIN : B E G I N ;
T_BETWEEN : B E T W E E N ;
T_BIGINT : B I G I N T ;
T_BINARY_DOUBLE : B I N A R Y '_' D O U B L E ;
T_BINARY_FLOAT : B I N A R Y '_' F L O A T ;
T_BINARY_INTEGER : B I N A R Y '_' I N T E G E R ;
T_BIT : B I T ;
T_BODY : B O D Y ;
T_BREAK : B R E A K ;
T_BY : B Y ;
T_BYTE : B Y T E ;
T_CALL : C A L L ;
T_CALLER : C A L L E R ;
T_CASCADE : C A S C A D E ;
T_CASE : C A S E ;
T_CASESPECIFIC : C A S E S P E C I F I C ;
T_CAST : C A S T ;
T_CHAR : C H A R ;
T_CHARACTER : C H A R A C T E R ;
T_CHARSET : C H A R S E T ;
T_CLIENT : C L I E N T ;
T_CLOSE : C L O S E ;
T_CLUSTERED : C L U S T E R E D;
T_CMP : C M P ;
T_COLLECT : C O L L E C T ;
T_COLLECTION : C O L L E C T I O N ;
T_COLUMN : C O L U M N ;
T_COMMENT : C O M M E N T;
T_CONSTANT : C O N S T A N T ;
T_COMMIT : C O M M I T ;
T_COMPRESS : C O M P R E S S ;
T_CONCAT : C O N C A T;
T_CONDITION : C O N D I T I O N ;
T_CONSTRAINT : C O N S T R A I N T ;
T_CONTINUE : C O N T I N U E ;
T_COPY : C O P Y ;
T_COUNT : C O U N T ;
T_COUNT_BIG : C O U N T '_' B I G;
T_CREATE : C R E A T E ;
T_CREATION : C R E A T I O N ;
T_CREATOR : C R E A T O R ;
T_CS : C S;
T_CURRENT : C U R R E N T ;
T_CURRENT_SCHEMA : C U R R E N T '_' S C H E M A ;
T_CURSOR : C U R S O R ;
T_DATABASE : D A T A B A S E ;
T_DATA : D A T A ;
T_DATE : D A T E ;
T_DATETIME : D A T E T I M E ;
T_DAY : D A Y ;
T_DAYS : D A Y S ;
T_DEC : D E C ;
T_DECIMAL : D E C I M A L ;
T_DECLARE : D E C L A R E ;
T_DEFAULT : D E F A U L T ;
T_DEFERRED : D E F E R R E D ;
T_DEFINED : D E F I N E D ;
T_DEFINER : D E F I N E R ;
T_DEFINITION : D E F I N I T I O N ;
T_DELETE : D E L E T E ;
T_DELIMITED : D E L I M I T E D ;
T_DELIMITER : D E L I M I T E R ;
T_DESC : D E S C ;
T_DESCRIBE : D E S C R I B E ;
T_DIAGNOSTICS : D I A G N O S T I C S ;
T_DIR : D I R ;
T_DIRECTORY : D I R E C T O R Y ;
T_DISTINCT : D I S T I N C T ;
T_DISTRIBUTE : D I S T R I B U T E ;
T_DO : D O ;
T_DOUBLE : D O U B L E ;
T_DROP : D R O P ;
T_DYNAMIC : D Y N A M I C ;
T_ELSE : E L S E ;
T_ELSEIF : E L S E I F ;
T_ELSIF : E L S I F ;
T_ENABLE : E N A B L E ;
T_END : E N D ;
T_ENGINE : E N G I N E ;
T_ESCAPED : E S C A P E D ;
T_EXCEPT : E X C E P T ;
T_EXEC : E X E C ;
T_EXECUTE : E X E C U T E ;
T_EXCEPTION : E X C E P T I O N ;
T_EXCLUSIVE : E X C L U S I V E ;
T_EXISTS : E X I S T S ;
T_EXIT : E X I T ;
T_FALLBACK : F A L L B A C K ;
T_FALSE : F A L S E ;
T_FETCH : F E T C H ;
T_FIELDS : F I E L D S ;
T_FILE : F I L E ;
T_FILES : F I L E S ;
T_FLOAT : F L O A T ;
T_FOR : F O R ;
T_FOREIGN : F O R E I G N ;
T_FORMAT : F O R M A T ;
T_FOUND : F O U N D ;
T_FROM : F R O M ;
T_FULL : F U L L ;
T_FUNCTION : F U N C T I O N ;
T_GET : G E T ;
T_GLOBAL : G L O B A L ;
T_GO : G O ;
T_GRANT : G R A N T ;
T_GROUP : G R O U P ;
T_HANDLER : H A N D L E R ;
T_HASH : H A S H ;
T_HAVING : H A V I N G ;
T_HDFS : H D F S ;
T_HIVE : H I V E ;
T_HOST : H O S T ;
T_IDENTITY : I D E N T I T Y ;
T_IF : I F ;
T_IGNORE : I G N O R E ;
T_IMMEDIATE : I M M E D I A T E ;
T_IN : I N ;
T_INCLUDE : I N C L U D E ;
T_INDEX : I N D E X ;
T_INITRANS : I N I T R A N S ;
T_INNER : I N N E R ;
T_INOUT : I N O U T;
T_INSERT : I N S E R T ;
T_INT : I N T ;
T_INT2 : I N T '2';
T_INT4 : I N T '4';
T_INT8 : I N T '8';
T_INTEGER : I N T E G E R ;
T_INTERSECT : I N T E R S E C T ;
T_INTERVAL : I N T E R V A L ;
T_INTO : I N T O ;
T_INVOKER : I N V O K E R ;
T_IS : I S ;
T_ISOPEN : I S O P E N ;
T_ITEMS : I T E M S ;
T_JOIN : J O I N ;
T_KEEP : K E E P;
T_KEY : K E Y ;
T_KEYS : K E Y S ;
T_LANGUAGE : L A N G U A G E ;
T_LEAVE : L E A V E ;
T_LEFT : L E F T ;
T_LIKE : L I K E ;
T_LIMIT : L I M I T ;
T_LINES : L I N E S ;
T_LOCAL : L O C A L ;
T_LOCATION : L O C A T I O N ;
T_LOCATOR : L O C A T O R ;
T_LOCATORS : L O C A T O R S ;
T_LOCKS : L O C K S ;
T_LOG : L O G ;
T_LOGGED : L O G G E D ;
T_LOGGING : L O G G I N G ;
T_LOOP : L O O P ;
T_MAP : M A P ;
T_MATCHED : M A T C H E D ;
T_MAX : M A X ;
T_MAXTRANS : M A X T R A N S ;
T_MERGE : M E R G E ;
T_MESSAGE_TEXT : M E S S A G E '_' T E X T ;
T_MICROSECOND : M I C R O S E C O N D ;
T_MICROSECONDS : M I C R O S E C O N D S;
T_MIN : M I N ;
T_MULTISET : M U L T I S E T ;
T_NCHAR : N C H A R ;
T_NEW : N E W ;
T_NVARCHAR : N V A R C H A R ;
T_NO : N O ;
T_NOCOUNT : N O C O U N T ;
T_NOCOMPRESS : N O C O M P R E S S ;
T_NOLOGGING : N O L O G G I N G ;
T_NONE : N O N E ;
T_NOT : N O T ;
T_NOTFOUND : N O T F O U N D ;
T_NULL : N U L L ;
T_NUMERIC : N U M E R I C ;
T_NUMBER : N U M B E R ;
T_OBJECT : O B J E C T ;
T_OFF : O F F ;
T_ON : O N ;
T_ONLY : O N L Y ;
T_OPEN : O P E N ;
T_OR : O R ;
T_ORDER : O R D E R;
T_OUT : O U T ;
T_OUTER : O U T E R ;
T_OVER : O V E R ;
T_OVERWRITE : O V E R W R I T E ;
T_OWNER : O W N E R ;
T_PACKAGE : P A C K A G E ;
T_PARTITION : P A R T I T I O N ;
T_PCTFREE : P C T F R E E ;
T_PCTUSED : P C T U S E D ;
T_PLS_INTEGER : P L S '_' I N T E G E R ;
T_PRECISION : P R E C I S I O N ;
T_PRESERVE : P R E S E R V E ;
T_PRIMARY : P R I M A R Y ;
T_PRINT : P R I N T ;
T_PROC : P R O C ;
T_PROCEDURE : P R O C E D U R E ;
T_QUALIFY : Q U A L I F Y ;
T_QUERY_BAND : Q U E R Y '_' B A N D ;
T_QUIT : Q U I T ;
T_QUOTED_IDENTIFIER : Q U O T E D '_' I D E N T I F I E R ;
T_RAISE : R A I S E ;
T_REAL : R E A L ;
T_REFERENCES : R E F E R E N C E S ;
T_REGEXP : R E G E X P ;
T_REPLACE : R E P L A C E ;
T_RESIGNAL : R E S I G N A L ;
T_RESTRICT : R E S T R I C T ;
T_RESULT : R E S U L T ;
T_RESULT_SET_LOCATOR : R E S U L T '_' S E T '_' L O C A T O R ;
T_RETURN : R E T U R N ;
T_RETURNS : R E T U R N S ;
T_REVERSE : R E V E R S E ;
T_RIGHT : R I G H T ;
T_RLIKE : R L I K E ;
T_ROLE : R O L E ;
T_ROLLBACK : R O L L B A C K ;
T_ROW : R O W ;
T_ROWS : R O W S ;
T_ROWTYPE : R O W T Y P E ;
T_ROW_COUNT : R O W '_' C O U N T ;
T_RR : R R;
T_RS : R S ;
T_PWD : P W D ;
T_TRIM : T R I M ;
T_SCHEMA : S C H E M A ;
T_SECOND : S E C O N D ;
T_SECONDS : S E C O N D S;
T_SECURITY : S E C U R I T Y ;
T_SEGMENT : S E G M E N T ;
T_SEL : S E L ;
T_SELECT : S E L E C T ;
T_SET : S E T ;
T_SESSION : S E S S I O N ;
T_SESSIONS : S E S S I O N S ;
T_SETS : S E T S;
T_SHARE : S H A R E ;
T_SIGNAL : S I G N A L ;
T_SIMPLE_DOUBLE : S I M P L E '_' D O U B L E ;
T_SIMPLE_FLOAT : S I M P L E '_' F L O A T ;
T_SIMPLE_INTEGER : S I M P L E '_' I N T E G E R ;
T_SMALLDATETIME : S M A L L D A T E T I M E ;
T_SMALLINT : S M A L L I N T ;
T_SQL : S Q L ;
T_SQLEXCEPTION : S Q L E X C E P T I O N ;
T_SQLINSERT : S Q L I N S E R T ;
T_SQLSTATE : S Q L S T A T E ;
T_SQLWARNING : S Q L W A R N I N G ;
T_STATS : S T A T S ;
T_STATISTICS : S T A T I S T I C S ;
T_STEP : S T E P ;
T_STORAGE : S T O R A G E ;
T_STORED : S T O R E D ;
T_STRING : S T R I N G ;
T_SUBDIR : S U B D I R ;
T_SUBSTRING : S U B S T R I N G ;
T_SUM : S U M ;
T_SUMMARY : S U M M A R Y ;
T_SYS_REFCURSOR : S Y S '_' R E F C U R S O R ;
T_TABLE : T A B L E ;
T_TABLESPACE : T A B L E S P A C E ;
T_TEMPORARY : T E M P O R A R Y ;
T_TERMINATED : T E R M I N A T E D ;
T_TEXTIMAGE_ON : T E X T I M A G E '_' O N ;
T_THEN : T H E N ;
T_TIMESTAMP : T I M E S T A M P ;
T_TINYINT : T I N Y I N T ;
T_TITLE : T I T L E ;
T_TO : T O ;
T_TOP : T O P ;
T_TRANSACTION : T R A N S A C T I O N ;
T_TRUE : T R U E ;
T_TRUNCATE : T R U N C A T E;
T_TYPE : T Y P E ;
T_UNION : U N I O N ;
T_UNIQUE : U N I Q U E ;
T_UPDATE : U P D A T E ;
T_UR : U R ;
T_USE : U S E ;
T_USING : U S I N G ;
T_VALUE : V A L U E ;
T_VALUES : V A L U E S ;
T_VAR : V A R ;
T_VARCHAR : V A R C H A R ;
T_VARCHAR2 : V A R C H A R '2' ;
T_VARYING : V A R Y I N G ;
T_VOLATILE : V O L A T I L E ;
T_WHEN : W H E N ;
T_WHERE : W H E R E ;
T_WHILE : W H I L E ;
T_WITH : W I T H ;
T_WITHOUT : W I T H O U T ;
T_WORK : W O R K ;
T_XACT_ABORT : X A C T '_' A B O R T ;
T_XML : X M L ;
T_YES : Y E S ;
options { caseInsensitive = true; }
// Functions with specific syntax
T_ACTIVITY_COUNT : A C T I V I T Y '_' C O U N T ;
T_CUME_DIST : C U M E '_' D I S T ;
T_CURRENT_DATE : C U R R E N T '_' D A T E ;
T_CURRENT_TIMESTAMP : C U R R E N T '_' T I M E S T A M P ;
T_CURRENT_USER : C U R R E N T '_' U S E R ;
T_DENSE_RANK : D E N S E '_' R A N K ;
T_FIRST_VALUE : F I R S T '_' V A L U E;
T_LAG : L A G ;
T_LAST_VALUE : L A S T '_' V A L U E;
T_LEAD : L E A D ;
T_MAX_PART_STRING : M A X '_' P A R T '_' S T R I N G ;
T_MIN_PART_STRING : M I N '_' P A R T '_' S T R I N G ;
T_MAX_PART_INT : M A X '_' P A R T '_' I N T ;
T_MIN_PART_INT : M I N '_' P A R T '_' I N T ;
T_MAX_PART_DATE : M A X '_' P A R T '_' D A T E ;
T_MIN_PART_DATE : M I N '_' P A R T '_' D A T E ;
T_PART_COUNT : P A R T '_' C O U N T ;
T_PART_LOC : P A R T '_' L O C ;
T_RANK : R A N K ;
T_ROW_NUMBER : R O W '_' N U M B E R;
T_STDEV : S T D E V ;
T_SYSDATE : S Y S D A T E ;
T_VARIANCE : V A R I A N C E ;
T_USER : U S E R;
// Keywords
KW_ABORT : 'ABORT';
KW_ACTIVATE : 'ACTIVATE';
KW_ACTIVE : 'ACTIVE';
KW_ADD : 'ADD';
KW_ADMIN : 'ADMIN';
KW_AFTER : 'AFTER';
KW_ALL : 'ALL';
KW_ALLOC_FRACTION : 'ALLOC_FRACTION';
KW_ALTER : 'ALTER';
KW_ANALYZE : 'ANALYZE';
KW_AND : 'AND';
KW_ANTI : 'ANTI';
KW_ANY : 'ANY';
KW_APPLICATION : 'APPLICATION';
KW_ARCHIVE : 'ARCHIVE';
KW_ARRAY : 'ARRAY';
KW_AS : 'AS';
KW_ASC : 'ASC';
KW_AST : 'AST';
KW_AT : 'AT';
KW_AUTHORIZATION : 'AUTHORIZATION';
KW_AUTOCOMMIT : 'AUTOCOMMIT';
KW_BATCH : 'KW_BATCH';
KW_BEFORE : 'BEFORE';
KW_BETWEEN : 'BETWEEN';
KW_BIGINT : 'BIGINT';
KW_BINARY : 'BINARY';
KW_BOOLEAN : 'BOOLEAN';
KW_BOTH : 'BOTH';
KW_BUCKET : 'BUCKET';
KW_BUCKETS : 'BUCKETS';
KW_BY : 'BY';
KW_CACHE : 'CACHE';
KW_CASCADE : 'CASCADE';
KW_CASE : 'CASE';
KW_CAST : 'CAST';
KW_CBO : 'CBO';
KW_CHANGE : 'CHANGE';
KW_CHAR : 'CHAR';
KW_CHECK : 'CHECK';
KW_CLUSTER : 'CLUSTER';
KW_CLUSTERED : 'CLUSTERED';
KW_CLUSTERSTATUS : 'CLUSTERSTATUS';
KW_COLLECTION : 'COLLECTION';
KW_COLUMN : 'COLUMN';
KW_COLUMNS : 'COLUMNS';
KW_COMMENT : 'COMMENT';
KW_COMMIT : 'COMMIT';
KW_COMPACT : 'COMPACT';
KW_COMPACTIONS : 'COMPACTIONS';
KW_COMPACT_ID : 'COMPACTIONID';
KW_COMPUTE : 'COMPUTE';
KW_CONCATENATE : 'CONCATENATE';
KW_CONF : 'CONF';
KW_CONSTRAINT : 'CONSTRAINT';
KW_CONTINUE : 'CONTINUE';
KW_COST : 'COST';
KW_CREATE : 'CREATE';
KW_CRON : 'CRON';
KW_CROSS : 'CROSS';
KW_CUBE : 'CUBE';
KW_CURRENT : 'CURRENT';
KW_CURRENT_DATE : 'CURRENT_DATE';
KW_CURRENT_TIMESTAMP : 'CURRENT_TIMESTAMP';
KW_CURSOR : 'CURSOR';
KW_DATA : 'DATA';
KW_DATABASE : 'DATABASE';
KW_DATABASES : 'DATABASES';
KW_DATACONNECTOR : 'CONNECTOR';
KW_DATACONNECTORS : 'CONNECTORS';
KW_DATE : 'DATE';
KW_DATETIME : 'DATETIME';
KW_DAY : 'DAY' 'S'?;
KW_DAYOFWEEK : 'KW_DAYOFWEEK';
KW_DBPROPERTIES : 'DBPROPERTIES';
KW_DCPROPERTIES : 'DCPROPERTIES';
KW_DDL : 'DDL';
KW_DEBUG : 'DEBUG';
KW_DECIMAL : 'DEC' 'IMAL'? | 'NUMERIC';
KW_DEFAULT : 'DEFAULT';
KW_DEFERRED : 'DEFERRED';
KW_DEFINED : 'DEFINED';
KW_DELETE : 'DELETE';
KW_DELIMITED : 'DELIMITED';
KW_DEPENDENCY : 'DEPENDENCY';
KW_DESC : 'DESC';
KW_DESCRIBE : 'DESCRIBE';
KW_DETAIL : 'DETAIL';
KW_DIRECTORIES : 'DIRECTORIES';
KW_DIRECTORY : 'DIRECTORY';
KW_DISABLE : 'DISABLE' 'D'?;
KW_DISTINCT : 'DISTINCT';
KW_DISTRIBUTE : 'DISTRIBUTE';
KW_DISTRIBUTED : 'DISTRIBUTED';
KW_DO : 'DO';
KW_DOUBLE : 'DOUBLE';
KW_DOW : 'DAYOFWEEK';
KW_DROP : 'DROP';
KW_DUMP : 'DUMP';
KW_ELEM_TYPE : '$ELEM$';
KW_ELSE : 'ELSE';
KW_ENABLE : 'ENABLE' 'D'?;
KW_END : 'END';
KW_ENFORCED : 'ENFORCED';
KW_ESCAPED : 'ESCAPED';
KW_EVERY : 'EVERY';
KW_EXCEPT : 'EXCEPT';
KW_EXCHANGE : 'EXCHANGE';
KW_EXCLUSIVE : 'EXCLUSIVE';
KW_EXECUTE : 'EXECUTE';
KW_EXECUTED : 'EXECUTED';
KW_EXISTS : 'EXISTS';
KW_EXPIRE_SNAPSHOTS : 'EXPIRE_SNAPSHOTS';
KW_EXPLAIN : 'EXPLAIN';
KW_EXPORT : 'EXPORT';
KW_EXPRESSION : 'EXPRESSION';
KW_EXTENDED : 'EXTENDED';
KW_EXTERNAL : 'EXTERNAL';
KW_EXTRACT : 'EXTRACT';
KW_FALSE : 'FALSE';
KW_FETCH : 'FETCH';
KW_FIELDS : 'FIELDS';
KW_FILE : 'FILE';
KW_FILEFORMAT : 'FILEFORMAT';
KW_FIRST : 'FIRST';
KW_FLOAT : 'FLOAT';
KW_FLOOR : 'FLOOR';
KW_FOLLOWING : 'FOLLOWING';
KW_FOR : 'FOR';
KW_FORCE : 'FORCE';
KW_FOREIGN : 'FOREIGN';
KW_FORMAT : 'FORMAT';
KW_FORMATTED : 'FORMATTED';
KW_FROM : 'FROM';
KW_FULL : 'FULL';
KW_FUNCTION : 'FUNCTION';
KW_FUNCTIONS : 'FUNCTIONS';
KW_GRANT : 'GRANT';
KW_GROUP : 'GROUP';
KW_GROUPING : 'GROUPING';
KW_HAVING : 'HAVING';
KW_HOLD_DDLTIME : 'KW_HOLD_DDLTIME';
KW_HOUR : 'HOUR' 'S'?;
KW_IDXPROPERTIES : 'IDXPROPERTIES';
KW_IF : 'IF';
KW_IGNORE : 'IGNORE';
KW_IMPORT : 'IMPORT';
KW_IN : 'IN';
KW_INDEX : 'INDEX';
KW_INDEXES : 'INDEXES';
KW_INNER : 'INNER';
KW_INPATH : 'INPATH';
KW_INPUTDRIVER : 'INPUTDRIVER';
KW_INPUTFORMAT : 'INPUTFORMAT';
KW_INSERT : 'INSERT';
KW_INT : 'INT' 'EGER'?;
KW_INTERSECT : 'INTERSECT';
KW_INTERVAL : 'INTERVAL';
KW_INTO : 'INTO';
KW_IS : 'IS';
KW_ISOLATION : 'ISOLATION';
KW_ITEMS : 'ITEMS';
KW_JAR : 'JAR';
KW_JOIN : 'JOIN';
KW_JOINCOST : 'JOINCOST';
KW_KEY : 'KEY';
KW_KEYS : 'KEYS';
KW_KEY_TYPE : '$KEY$';
KW_KILL : 'KILL';
KW_LAST : 'LAST';
KW_LATERAL : 'LATERAL';
KW_LEADING : 'LEADING';
KW_LEFT : 'LEFT';
KW_LESS : 'LESS';
KW_LEVEL : 'LEVEL';
KW_LIKE : 'LIKE';
KW_LIMIT : 'LIMIT';
KW_LINES : 'LINES';
KW_LOAD : 'LOAD';
KW_LOCAL : 'LOCAL';
KW_LOCATION : 'LOCATION';
KW_LOCK : 'LOCK';
KW_LOCKS : 'LOCKS';
KW_LOGICAL : 'LOGICAL';
KW_LONG : 'LONG';
KW_MACRO : 'MACRO';
KW_MANAGED : 'MANAGED';
KW_MANAGEDLOCATION : 'MANAGEDLOCATION';
KW_MANAGEMENT : 'MANAGEMENT';
KW_MAP : 'MAP';
KW_MAPJOIN : 'MAPJOIN';
KW_MAPPING : 'MAPPING';
KW_MATCHED : 'MATCHED';
KW_MATERIALIZED : 'MATERIALIZED';
KW_MERGE : 'MERGE';
KW_METADATA : 'METADATA';
KW_MINUS : 'MINUS';
KW_MINUTE : 'MINUTE' 'S'?;
KW_MONTH : 'MONTH' 'S'?;
KW_MORE : 'MORE';
KW_MOVE : 'MOVE';
KW_MSCK : 'MSCK';
KW_NONE : 'NONE';
KW_NORELY : 'NORELY';
KW_NOSCAN : 'NOSCAN';
KW_NOT : 'NOT' | '!';
KW_NOVALIDATE : 'NOVALIDATE';
KW_NO_DROP : 'KW_NO_DROP';
KW_NULL : 'NULL';
KW_NULLS : 'NULLS';
KW_OF : 'OF';
KW_OFFLINE : 'KW_OFFLINE';
KW_OFFSET : 'OFFSET';
KW_ON : 'ON';
KW_ONLY : 'ONLY';
KW_OPERATOR : 'OPERATOR';
KW_OPTION : 'OPTION';
KW_OR : 'OR';
KW_ORDER : 'ORDER';
KW_OUT : 'OUT';
KW_OUTER : 'OUTER';
KW_OUTPUTDRIVER : 'OUTPUTDRIVER';
KW_OUTPUTFORMAT : 'OUTPUTFORMAT';
KW_OVER : 'OVER';
KW_OVERWRITE : 'OVERWRITE';
KW_OWNER : 'OWNER';
KW_PARTITION : 'PARTITION';
KW_PARTITIONED : 'PARTITIONED';
KW_PARTITIONS : 'PARTITIONS';
KW_PATH : 'PATH';
KW_PERCENT : 'PERCENT';
KW_PKFK_JOIN : 'PKFK_JOIN';
KW_PLAN : 'PLAN';
KW_PLANS : 'PLANS';
KW_PLUS : 'PLUS';
KW_POOL : 'POOL';
KW_PRECEDING : 'PRECEDING';
KW_PRECISION : 'PRECISION';
KW_PREPARE : 'PREPARE';
KW_PRESERVE : 'PRESERVE';
KW_PRIMARY : 'PRIMARY';
KW_PRINCIPALS : 'PRINCIPALS';
KW_PROCEDURE : 'PROCEDURE';
KW_PROTECTION : 'KW_PROTECTION';
KW_PURGE : 'PURGE';
KW_QUALIFY : 'QUALIFY';
KW_QUARTER : 'QUARTER';
KW_QUERY : 'QUERY';
KW_QUERY_PARALLELISM : 'QUERY_PARALLELISM';
KW_RANGE : 'RANGE';
KW_READ : 'READ';
KW_READONLY : 'KW_READONLY';
KW_READS : 'READS';
KW_REAL : 'REAL';
KW_REBUILD : 'REBUILD';
KW_RECORDREADER : 'RECORDREADER';
KW_RECORDWRITER : 'RECORDWRITER';
KW_REDUCE : 'REDUCE';
KW_REFERENCES : 'REFERENCES';
KW_REGEXP : 'REGEXP';
KW_RELOAD : 'RELOAD';
KW_RELY : 'RELY';
KW_REMOTE : 'REMOTE';
KW_RENAME : 'RENAME';
KW_REOPTIMIZATION : 'REOPTIMIZATION';
KW_REPAIR : 'REPAIR';
KW_REPL : 'REPL';
KW_REPLACE : 'REPLACE';
KW_REPLICATION : 'REPLICATION';
KW_RESOURCE : 'RESOURCE';
KW_RESPECT : 'RESPECT';
KW_RESTRICT : 'RESTRICT';
KW_REVOKE : 'REVOKE';
KW_REWRITE : 'REWRITE';
KW_RIGHT : 'RIGHT';
KW_RLIKE : 'RLIKE';
KW_ROLE : 'ROLE';
KW_ROLES : 'ROLES';
KW_ROLLBACK : 'ROLLBACK';
KW_ROLLUP : 'ROLLUP';
KW_ROW : 'ROW';
KW_ROWS : 'ROWS';
KW_SCHEDULED : 'SCHEDULED';
KW_SCHEDULING_POLICY : 'SCHEDULING_POLICY';
KW_SCHEMA : 'SCHEMA';
KW_SCHEMAS : 'SCHEMAS';
KW_SECOND : 'SECOND' 'S'?;
KW_SELECT : 'SELECT';
KW_SEMI : 'SEMI';
KW_SERDE : 'SERDE';
KW_SERDEPROPERTIES : 'SERDEPROPERTIES';
KW_SERVER : 'SERVER';
KW_SET : 'SET';
KW_SETS : 'SETS';
KW_SET_CURRENT_SNAPSHOT : 'SET_CURRENT_SNAPSHOT';
KW_SHARED : 'SHARED';
KW_SHOW : 'SHOW';
KW_SHOW_DATABASE : 'SHOW_DATABASE';
KW_SKEWED : 'SKEWED';
KW_SMALLINT : 'SMALLINT';
KW_SNAPSHOT : 'SNAPSHOT';
KW_SOME : 'SOME';
KW_SORT : 'SORT';
KW_SORTED : 'SORTED';
KW_SPEC : 'SPEC';
KW_SSL : 'SSL';
KW_START : 'START';
KW_STATISTICS : 'STATISTICS';
KW_STATUS : 'STATUS';
KW_STORED : 'STORED';
KW_STREAMTABLE : 'STREAMTABLE';
KW_STRING : 'STRING';
KW_STRUCT : 'STRUCT';
KW_SUMMARY : 'SUMMARY';
KW_SYNC : 'SYNC';
KW_SYSTEM_TIME : 'SYSTEM_TIME';
KW_SYSTEM_VERSION : 'SYSTEM_VERSION';
KW_TABLE : 'TABLE';
KW_TABLES : 'TABLES';
KW_TABLESAMPLE : 'TABLESAMPLE';
KW_TBLPROPERTIES : 'TBLPROPERTIES';
KW_TEMPORARY : 'TEMPORARY';
KW_TERMINATED : 'TERMINATED';
KW_THEN : 'THEN';
KW_TIME : 'TIME';
KW_TIMESTAMP : 'TIMESTAMP';
KW_TIMESTAMPLOCALTZ : 'TIMESTAMPLOCALTZ';
KW_TIMESTAMPTZ : 'KW_TIMESTAMPTZ';
KW_TINYINT : 'TINYINT';
KW_TO : 'TO';
KW_TOUCH : 'TOUCH';
KW_TRAILING : 'TRAILING';
KW_TRANSACTION : 'TRANSACTION';
KW_TRANSACTIONAL : 'TRANSACTIONAL';
KW_TRANSACTIONS : 'TRANSACTIONS';
KW_TRANSFORM : 'TRANSFORM';
KW_TRIGGER : 'TRIGGER';
KW_TRIM : 'TRIM';
KW_TRUE : 'TRUE';
KW_TRUNCATE : 'TRUNCATE';
KW_TYPE : 'TYPE';
KW_UNARCHIVE : 'UNARCHIVE';
KW_UNBOUNDED : 'UNBOUNDED';
KW_UNDO : 'UNDO';
KW_UNION : 'UNION';
KW_UNIONTYPE : 'UNIONTYPE';
KW_UNIQUE : 'UNIQUE';
KW_UNIQUEJOIN : 'UNIQUEJOIN';
KW_UNKNOWN : 'UNKNOWN';
KW_UNLOCK : 'UNLOCK';
KW_UNMANAGED : 'UNMANAGED';
KW_UNSET : 'UNSET';
KW_UNSIGNED : 'UNSIGNED';
KW_UPDATE : 'UPDATE';
KW_URI : 'URI';
KW_URL : 'URL';
KW_USE : 'USE';
KW_USER : 'USER';
KW_USING : 'USING';
KW_UTC : 'UTC';
KW_UTCTIMESTAMP : 'UTC_TMESTAMP';
KW_VALIDATE : 'VALIDATE';
KW_VALUES : 'VALUES';
KW_VALUE_TYPE : '$VALUE$';
KW_VARCHAR : 'VARCHAR';
KW_VECTORIZATION : 'VECTORIZATION';
KW_VIEW : 'VIEW';
KW_VIEWS : 'VIEWS';
KW_WAIT : 'WAIT';
KW_WEEK : 'WEEK' 'S'?;
KW_WHEN : 'WHEN';
KW_WHERE : 'WHERE';
KW_WHILE : 'WHILE';
KW_WINDOW : 'WINDOW';
KW_WITH : 'WITH';
KW_WITHIN : 'WITHIN';
KW_WORK : 'WORK';
KW_WORKLOAD : 'WORKLOAD';
KW_WRITE : 'WRITE';
KW_YEAR : 'YEAR' 'S'?;
KW_ZONE : 'ZONE';
T_ADD : '+' ;
T_COLON : ':' ;
T_COMMA : ',' ;
T_PIPE : '||' ;
T_DIV : '/' ;
T_DOT : '.' ;
T_DOT2 : '..' ;
T_EQUAL : '=' ;
T_EQUAL2 : '==' ;
T_SHARP : '#' ;
T_NOTE : '!' ;
T_NOTEQUAL : '<>' ;
T_NOTEQUAL2 : '!=' ;
T_GREATER : '>' ;
T_GREATEREQUAL : '>=' ;
T_LESS : '<' ;
T_LESSEQUAL : '<=' ;
T_MUL : '*' ;
T_PRECENT : '%' ;
T_CALLS : '@' ;
T_OPEN_B : '{' ;
T_OPEN_P : '(' ;
T_OPEN_SB : '[' ;
T_CLOSE_B : '}' ;
T_CLOSE_P : ')' ;
T_CLOSE_SB : ']' ;
T_SEMICOLON : ';' ;
T_SUB : '-' ;
// Operators
// NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.
DOT : '.'; // generated as a part of Number rule
COLON : ':' ;
COMMA : ',' ;
SEMICOLON : ';' ;
L_ID : L_ID_PART // Identifier
;
L_S_STRING : '\'' (('\'' '\'') | ('\\' '\'') | ~('\''))* '\'' // Single quoted string literal
;
L_D_STRING : '"' (L_STR_ESC_D | .)*? '"' // Double quoted string literal
;
L_INT : L_DIGIT+ ; // Integer
L_DEC : L_DIGIT+ '.' ~'.' L_DIGIT* // Decimal number
| '.' L_DIGIT+
;
L_WS : L_BLANK+ -> skip ; // Whitespace
L_M_COMMENT : '/*' .*? '*/' -> channel(HIDDEN) ; // Multiline comment
L_S_COMMENT : ('--' | '//') .*? '\r'? '\n' -> channel(HIDDEN) ; // Single line comment
LPAREN : '(' ;
RPAREN : ')' ;
LSQUARE : '[' ;
RSQUARE : ']' ;
LCURLY : '{';
RCURLY : '}';
L_FILE : ([a-zA-Z] ':' '\\'?)? L_ID ('\\' L_ID)* // File path (a/b/c Linux path causes conflicts with division operator and handled at parser level)
EQUAL : '=' | '==';
EQUAL_NS : '<=>';
NOTEQUAL : '<>' | '!=';
LESSTHANOREQUALTO : '<=';
LESSTHAN : '<';
GREATERTHANOREQUALTO : '>=';
GREATERTHAN : '>';
DIVIDE : '/';
PLUS : '+';
MINUS : '-';
STAR : '*';
MOD : '%';
DIV : 'DIV';
AMPERSAND : '&';
TILDE : '~';
BITWISEOR : '|';
CONCATENATE : '||';
BITWISEXOR : '^';
QUESTION : '?';
DOLLAR : '$';
// LITERALS
StringLiteral
: ( '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
| '"' ( ~('"'|'\\') | ('\\' .) )* '"'
)+
;
L_LABEL : ([a-zA-Z] | L_DIGIT | '_')* ':'
CharSetLiteral
: StringLiteral
| '0' 'X' (HexDigit | Digit)+
;
IntegralLiteral
: Digit+ ('L' | 'S' | 'Y')
;
NumberLiteral
: Number ('B'? 'D')
;
ByteLengthLiteral
: Digit+ [BKMG]
;
Number
: Digit+ (DOT Digit* Exponent? | Exponent)?
;
/*
An Identifier can be:
- tableName
- columnName
- select expr alias
- lateral view aliases
- database name
- view name
- subquery alias
- function name
- ptf argument identifier
- index name
- property name for: db,tbl,partition...
- fileFormat
- role name
- privilege name
- principal name
- macro name
- hint name
- window name
*/
Identifier
: (Letter | Digit) (Letter | Digit | '_')*
| QuotedIdentifier
| '`' RegexComponent+ '`'
;
fragment
L_ID_PART :
[a-zA-Z] ([a-zA-Z] | L_DIGIT | '_')* // Identifier part
| '$' '{' .*? '}'
| ('_' | '@' | ':' | '#' | '$') ([a-zA-Z] | L_DIGIT | '_' | '@' | ':' | '#' | '$')+ // (at least one char must follow special char)
| '"' .*? '"' // Quoted identifiers
| '[' .*? ']'
| '`' .*? '`'
;
fragment
L_STR_ESC_D : // Double quoted string escape sequence
'""' | '\\"'
;
fragment
L_DIGIT : [0-9] // Digit
;
fragment
L_BLANK : (' ' | '\t' | '\r' | '\n')
QuotedIdentifier
: '`' ('``' | ~'`')* '`'
;
// Support case-insensitive keywords and allowing case-sensitive identifiers
fragment A : ('a'|'A') ;
fragment B : ('b'|'B') ;
fragment C : ('c'|'C') ;
fragment D : ('d'|'D') ;
fragment E : ('e'|'E') ;
fragment F : ('f'|'F') ;
fragment G : ('g'|'G') ;
fragment H : ('h'|'H') ;
fragment I : ('i'|'I') ;
fragment J : ('j'|'J') ;
fragment K : ('k'|'K') ;
fragment L : ('l'|'L') ;
fragment M : ('m'|'M') ;
fragment N : ('n'|'N') ;
fragment O : ('o'|'O') ;
fragment P : ('p'|'P') ;
fragment Q : ('q'|'Q') ;
fragment R : ('r'|'R') ;
fragment S : ('s'|'S') ;
fragment T : ('t'|'T') ;
fragment U : ('u'|'U') ;
fragment V : ('v'|'V') ;
fragment W : ('w'|'W') ;
fragment X : ('x'|'X') ;
fragment Y : ('y'|'Y') ;
fragment Z : ('z'|'Z') ;
fragment
Letter
: 'A'..'Z'
;
fragment
HexDigit
: 'A'..'F'
;
fragment
Digit
: '0'..'9'
;
fragment
Exponent
: ('E') ( PLUS|MINUS )? (Digit)+
;
fragment
RegexComponent
: 'A'..'Z' | '0'..'9' | '_'
| PLUS | STAR | QUESTION | MINUS | DOT
| LPAREN | RPAREN | LSQUARE | RSQUARE | LCURLY | RCURLY
| BITWISEXOR | BITWISEOR | DOLLAR | '!'
;
CharSetName
: '_' (Letter | Digit | '_' | '-' | '.' | ':')+
;
WHITE_SPACE
: (' '|'\r'|'\t'|'\n') -> channel(HIDDEN)
;
LINE_COMMENT
: '--' ~('\n' | '\r')* -> channel(HIDDEN)
;
QUERY_HINT
: SHOW_HINT
| HIDDEN_HINT
;
SHOW_HINT
: '/*+' (QUERY_HINT | .)*? '*/' ->channel(HIDDEN)
;
HIDDEN_HINT
: '/*' (QUERY_HINT | .)*? '*/' -> channel(HIDDEN)
;

View File

@ -0,0 +1,6 @@
# HiveSQL (V4)
## Sources
<https://github.com/antlr/grammars-v4/blob/master/sql/hive/v4/README.md>
<https://github.com/apache/hive/tree/master/parser/src/java/org/apache/hadoop/hive/ql/parse>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -22,15 +22,21 @@ lexer grammar PlSqlLexer;
options {
superClass=PlSqlBaseLexer;
caseInsensitive = true;
}
@lexer::header {
import PlSqlBaseLexer from "./PlSqlBaseLexer";
}
ABORT: 'ABORT';
ABS: 'ABS';
ABSENT: 'ABSENT';
ACCESS: 'ACCESS';
ACCESSED: 'ACCESSED';
ACCOUNT: 'ACCOUNT';
ACL: 'ACL';
ACOS: 'ACOS';
ACROSS: 'ACROSS';
ACTION: 'ACTION';
ACTIONS: 'ACTIONS';
ACTIVATE: 'ACTIVATE';
@ -63,8 +69,11 @@ ALLOCATE: 'ALLOCATE';
ALLOW: 'ALLOW';
ALL_ROWS: 'ALL_ROWS';
ALTER: 'ALTER';
ALTERNATE: 'ALTERNATE';
ALWAYS: 'ALWAYS';
ANALYTIC: 'ANALYTIC';
ANALYZE: 'ANALYZE';
ANCESTOR: 'ANCESTOR';
ANCILLARY: 'ANCILLARY';
AND: 'AND';
AND_EQUAL: 'AND_EQUAL';
@ -116,7 +125,9 @@ AUTO_REOPTIMIZE: 'AUTO_REOPTIMIZE';
AVAILABILITY: 'AVAILABILITY';
AVRO: 'AVRO';
BACKGROUND: 'BACKGROUND';
BACKINGFILE: 'BACKINGFILE';
BACKUP: 'BACKUP';
BACKUPS: 'BACKUPS';
BACKUPSET: 'BACKUPSET';
BASIC: 'BASIC';
BASICFILE: 'BASICFILE';
@ -184,10 +195,12 @@ CALL: 'CALL';
CANCEL: 'CANCEL';
CANONICAL: 'CANONICAL';
CAPACITY: 'CAPACITY';
CAPTION: 'CAPTION';
CARDINALITY: 'CARDINALITY';
CASCADE: 'CASCADE';
CASE: 'CASE';
CAST: 'CAST';
CASESENSITIVE: 'CASE-SENSITIVE';
CATEGORY: 'CATEGORY';
CDBDEFAULT: 'CDB$DEFAULT';
CEIL: 'CEIL';
@ -210,7 +223,10 @@ CHOOSE: 'CHOOSE';
CHR: 'CHR';
CHUNK: 'CHUNK';
CLASS: 'CLASS';
CLASSIFICATION: 'CLASSIFICATION';
CLASSIFIER: 'CLASSIFIER';
CLAUSE: 'CLAUSE';
CLEAN: 'CLEAN';
CLEANUP: 'CLEANUP';
CLEAR: 'CLEAR';
C_LETTER: 'C';
@ -243,6 +259,7 @@ COLUMN_VALUE: 'COLUMN_VALUE';
COMMENT: 'COMMENT';
COMMIT: 'COMMIT';
COMMITTED: 'COMMITTED';
COMMON: 'COMMON';
COMMON_DATA: 'COMMON_DATA';
COMPACT: 'COMPACT';
COMPATIBILITY: 'COMPATIBILITY';
@ -284,8 +301,10 @@ CONSTRAINT: 'CONSTRAINT';
CONSTRAINTS: 'CONSTRAINTS';
CONSTRUCTOR: 'CONSTRUCTOR';
CONTAINER: 'CONTAINER';
CONTAINER_DATA: 'CONTAINER_DATA';
CONTAINERS: 'CONTAINERS';
CONTAINERS_DEFAULT: 'CONTAINERS_DEFAULT';
CONTAINER_DATA: 'CONTAINER_DATA';
CONTAINER_MAP: 'CONTAINER_MAP';
CONTENT: 'CONTENT';
CONTENTS: 'CONTENTS';
CONTEXT: 'CONTEXT';
@ -293,6 +312,7 @@ CONTINUE: 'CONTINUE';
CONTROLFILE: 'CONTROLFILE';
CON_UID_TO_ID: 'CON_UID_TO_ID';
CONVERT: 'CONVERT';
CONVERSION: 'CONVERSION';
COOKIE: 'COOKIE';
COPY: 'COPY';
CORR_K: 'CORR_K';
@ -376,6 +396,8 @@ DECRYPT: 'DECRYPT';
DEDUPLICATE: 'DEDUPLICATE';
DEFAULT: 'DEFAULT';
DEFAULTS: 'DEFAULTS';
DEFAULT_COLLATION: 'DEFAULT_COLLATION';
DEFAULT_CREDENTIAL: 'DEFAULT_CREDENTIAL';
DEFERRABLE: 'DEFERRABLE';
DEFERRED: 'DEFERRED';
DEFINED: 'DEFINED';
@ -395,6 +417,7 @@ DEQUEUE: 'DEQUEUE';
DEREF: 'DEREF';
DEREF_NO_REWRITE: 'DEREF_NO_REWRITE';
DESC: 'DESC';
DESCRIPTION: 'DESCRIPTION';
DESTROY: 'DESTROY';
DETACHED: 'DETACHED';
DETERMINES: 'DETERMINES';
@ -445,6 +468,7 @@ DV: 'DV';
DYNAMIC: 'DYNAMIC';
DYNAMIC_SAMPLING: 'DYNAMIC_SAMPLING';
DYNAMIC_SAMPLING_EST_CDN: 'DYNAMIC_SAMPLING_EST_CDN';
E_LETTER: 'E';
EACH: 'EACH';
EDITIONABLE: 'EDITIONABLE';
EDITION: 'EDITION';
@ -537,6 +561,7 @@ FAR: 'FAR';
FAST: 'FAST';
FASTSTART: 'FASTSTART';
FBTSCAN: 'FBTSCAN';
FEATURE: 'FEATURE';
FEATURE_DETAILS: 'FEATURE_DETAILS';
FEATURE_ID: 'FEATURE_ID';
FEATURE_SET: 'FEATURE_SET';
@ -544,6 +569,8 @@ FEATURE_VALUE: 'FEATURE_VALUE';
FETCH: 'FETCH';
FILE: 'FILE';
FILE_NAME_CONVERT: 'FILE_NAME_CONVERT';
FILEGROUP: 'FILEGROUP';
FILESTORE: 'FILESTORE';
FILESYSTEM_LIKE_LOGGING: 'FILESYSTEM_LIKE_LOGGING';
FILTER: 'FILTER';
FINAL: 'FINAL';
@ -559,6 +586,7 @@ FLASHBACK: 'FLASHBACK';
FLASH_CACHE: 'FLASH_CACHE';
FLOAT: 'FLOAT';
FLOB: 'FLOB';
FLEX: 'FLEX';
FLOOR: 'FLOOR';
FLUSH: 'FLUSH';
FOLDER: 'FOLDER';
@ -583,6 +611,8 @@ FULL: 'FULL';
FULL_OUTER_JOIN_TO_OUTER: 'FULL_OUTER_JOIN_TO_OUTER';
FUNCTION: 'FUNCTION';
FUNCTIONS: 'FUNCTIONS';
FTP: 'FTP';
G_LETTER: 'G';
GATHER_OPTIMIZER_STATISTICS: 'GATHER_OPTIMIZER_STATISTICS';
GATHER_PLAN_STATISTICS: 'GATHER_PLAN_STATISTICS';
GBY_CONC_ROLLUP: 'GBY_CONC_ROLLUP';
@ -604,6 +634,7 @@ GROUPS: 'GROUPS';
GUARANTEED: 'GUARANTEED';
GUARANTEE: 'GUARANTEE';
GUARD: 'GUARD';
HALF_YEARS: 'HALF_YEARS';
HASH_AJ: 'HASH_AJ';
HASH: 'HASH';
HASHKEYS: 'HASHKEYS';
@ -616,14 +647,20 @@ HEXTORAW: 'HEXTORAW';
HEXTOREF: 'HEXTOREF';
HIDDEN_KEYWORD: 'HIDDEN';
HIDE: 'HIDE';
HIER_ORDER: 'HIER_ORDER';
HIERARCHICAL: 'HIERARCHICAL';
HIERARCHIES: 'HIERARCHIES';
HIERARCHY: 'HIERARCHY';
HIGH: 'HIGH';
HINTSET_BEGIN: 'HINTSET_BEGIN';
HINTSET_END: 'HINTSET_END';
HOT: 'HOT';
HOUR: 'HOUR';
HOURS: 'HOURS';
HTTP: 'HTTP';
HWM_BROKERED: 'HWM_BROKERED';
HYBRID: 'HYBRID';
H_LETTER: 'H';
IDENTIFIED: 'IDENTIFIED';
IDENTIFIER: 'IDENTIFIER';
IDENTITY: 'IDENTITY';
@ -640,6 +677,7 @@ IMMEDIATE: 'IMMEDIATE';
IMPACT: 'IMPACT';
IMPORT: 'IMPORT';
INACTIVE: 'INACTIVE';
INACTIVE_ACCOUNT_TIME: 'INACTIVE_ACCOUNT_TIME';
INCLUDE: 'INCLUDE';
INCLUDE_VERSION: 'INCLUDE_VERSION';
INCLUDING: 'INCLUDING';
@ -719,6 +757,7 @@ INVALIDATE: 'INVALIDATE';
INVISIBLE: 'INVISIBLE';
IN_XQUERY: 'IN_XQUERY';
IS: 'IS';
IS_LEAF: 'IS_LEAF';
ISOLATION: 'ISOLATION';
ISOLATION_LEVEL: 'ISOLATION_LEVEL';
ITERATE: 'ITERATE';
@ -741,7 +780,9 @@ JSON_SERIALIZE: 'JSON_SERIALIZE';
JSON_TABLE: 'JSON_TABLE';
JSON_TEXTCONTAINS2: 'JSON_TEXTCONTAINS2';
JSON_TEXTCONTAINS: 'JSON_TEXTCONTAINS';
JSON_TRANSFORM: 'JSON_TRANSFORM';
JSON_VALUE: 'JSON_VALUE';
K_LETTER: 'K';
KEEP_DUPLICATES: 'KEEP_DUPLICATES';
KEEP: 'KEEP';
KERBEROS: 'KERBEROS';
@ -762,6 +803,9 @@ LAYER: 'LAYER';
LDAP_REGISTRATION_ENABLED: 'LDAP_REGISTRATION_ENABLED';
LDAP_REGISTRATION: 'LDAP_REGISTRATION';
LDAP_REG_SYNC_INTERVAL: 'LDAP_REG_SYNC_INTERVAL';
LEAF: 'LEAF';
LEAD_CDB: 'LEAD_CDB';
LEAD_CDB_URI: 'LEAD_CDB_URI';
LEADING: 'LEADING';
LEFT: 'LEFT';
LENGTH2: 'LENGTH2';
@ -771,6 +815,7 @@ LENGTHC: 'LENGTHC';
LENGTH: 'LENGTH';
LESS: 'LESS';
LEVEL: 'LEVEL';
LEVEL_NAME: 'LEVEL_NAME';
LEVELS: 'LEVELS';
LIBRARY: 'LIBRARY';
LIFECYCLE: 'LIFECYCLE';
@ -797,6 +842,7 @@ LOCALTIME: 'LOCALTIME';
LOCALTIMESTAMP: 'LOCALTIMESTAMP';
LOCATION: 'LOCATION';
LOCATOR: 'LOCATOR';
LOCKDOWN: 'LOCKDOWN';
LOCKED: 'LOCKED';
LOCKING: 'LOCKING';
LOCK: 'LOCK';
@ -813,16 +859,19 @@ LOGON: 'LOGON';
LOG_READ_ONLY_VIOLATIONS: 'LOG_READ_ONLY_VIOLATIONS';
LONG: 'LONG';
LOOP: 'LOOP';
LOST: 'LOST';
LOWER: 'LOWER';
LOW: 'LOW';
LPAD: 'LPAD';
LTRIM: 'LTRIM';
M_LETTER: 'M';
MAIN: 'MAIN';
MAKE_REF: 'MAKE_REF';
MANAGED: 'MANAGED';
MANAGE: 'MANAGE';
MANAGEMENT: 'MANAGEMENT';
MANAGER: 'MANAGER';
MANDATORY: 'MANDATORY';
MANUAL: 'MANUAL';
MAP: 'MAP';
MAPPING: 'MAPPING';
@ -850,6 +899,10 @@ MEASURE: 'MEASURE';
MEASURES: 'MEASURES';
MEDIUM: 'MEDIUM';
MEMBER: 'MEMBER';
MEMBER_CAPTION: 'MEMBER_CAPTION';
MEMBER_DESCRIPTION: 'MEMBER_DESCRIPTION';
MEMBER_NAME: 'MEMBER_NAME';
MEMBER_UNIQUE_NAME: 'MEMBER_UNIQUE_NAME';
MEMCOMPRESS: 'MEMCOMPRESS';
MEMORY: 'MEMORY';
MERGEACTIONS: 'MERGE$ACTIONS';
@ -868,10 +921,13 @@ MINING: 'MINING';
MINUS: 'MINUS';
MINUS_NULL: 'MINUS_NULL';
MINUTE: 'MINUTE';
MINUTES: 'MINUTES';
MINVALUE: 'MINVALUE';
MIRRORCOLD: 'MIRRORCOLD';
MIRRORHOT: 'MIRRORHOT';
MIRROR: 'MIRROR';
MISSING: 'MISSING';
MISMATCH: 'MISMATCH';
MLSLABEL: 'MLSLABEL';
MODEL_COMPILE_SUBQUERY: 'MODEL_COMPILE_SUBQUERY';
MODEL_DONTVERIFY_UNIQUENESS: 'MODEL_DONTVERIFY_UNIQUENESS';
@ -896,6 +952,7 @@ MONTHS_BETWEEN: 'MONTHS_BETWEEN';
MONTHS: 'MONTHS';
MOUNT: 'MOUNT';
MOUNTPATH: 'MOUNTPATH';
MOUNTPOINT: 'MOUNTPOINT';
MOVEMENT: 'MOVEMENT';
MOVE: 'MOVE';
MULTIDIMENSIONAL: 'MULTIDIMENSIONAL';
@ -1053,6 +1110,7 @@ NO_PLACE_DISTINCT: 'NO_PLACE_DISTINCT';
NO_PLACE_GROUP_BY: 'NO_PLACE_GROUP_BY';
NO_PQ_CONCURRENT_UNION: 'NO_PQ_CONCURRENT_UNION';
NO_PQ_MAP: 'NO_PQ_MAP';
NOPROMPT: 'NOPROMPT';
NO_PQ_REPLICATE: 'NO_PQ_REPLICATE';
NO_PQ_SKEW: 'NO_PQ_SKEW';
NO_PRUNE_GSETS: 'NO_PRUNE_GSETS';
@ -1214,6 +1272,7 @@ OVERRIDING: 'OVERRIDING';
OWNER: 'OWNER';
OWNERSHIP: 'OWNERSHIP';
OWN: 'OWN';
P_LETTER: 'P';
PACKAGE: 'PACKAGE';
PACKAGES: 'PACKAGES';
PARALLEL_ENABLE: 'PARALLEL_ENABLE';
@ -1223,6 +1282,8 @@ PARAMETERFILE: 'PARAMETERFILE';
PARAMETERS: 'PARAMETERS';
PARAM: 'PARAM';
PARENT: 'PARENT';
PARENT_LEVEL_NAME: 'PARENT_LEVEL_NAME';
PARENT_UNIQUE_NAME: 'PARENT_UNIQUE_NAME';
PARITY: 'PARITY';
PARTIAL_JOIN: 'PARTIAL_JOIN';
PARTIALLY: 'PARTIALLY';
@ -1241,6 +1302,7 @@ PASSWORD_LOCK_TIME: 'PASSWORD_LOCK_TIME';
PASSWORD: 'PASSWORD';
PASSWORD_REUSE_MAX: 'PASSWORD_REUSE_MAX';
PASSWORD_REUSE_TIME: 'PASSWORD_REUSE_TIME';
PASSWORD_ROLLOVER_TIME: 'PASSWORD_ROLLOVER_TIME';
PASSWORD_VERIFY_FUNCTION: 'PASSWORD_VERIFY_FUNCTION';
PAST: 'PAST';
PATCH: 'PATCH';
@ -1289,6 +1351,7 @@ PLSQL_DEBUG: 'PLSQL_DEBUG';
PLSQL_OPTIMIZE_LEVEL: 'PLSQL_OPTIMIZE_LEVEL';
PLSQL_WARNINGS: 'PLSQL_WARNINGS';
PLUGGABLE: 'PLUGGABLE';
PMEM: 'PMEM';
POINT: 'POINT';
POLICY: 'POLICY';
POOL_16K: 'POOL_16K';
@ -1343,8 +1406,10 @@ PROFILE: 'PROFILE';
PROGRAM: 'PROGRAM';
PROJECT: 'PROJECT';
PROPAGATE: 'PROPAGATE';
PROPERTY: 'PROPERTY';
PROTECTED: 'PROTECTED';
PROTECTION: 'PROTECTION';
PROTOCOL: 'PROTOCOL';
PROXY: 'PROXY';
PRUNING: 'PRUNING';
PUBLIC: 'PUBLIC';
@ -1356,6 +1421,7 @@ PX_FAULT_TOLERANCE: 'PX_FAULT_TOLERANCE';
PX_GRANULE: 'PX_GRANULE';
PX_JOIN_FILTER: 'PX_JOIN_FILTER';
QB_NAME: 'QB_NAME';
QUARTERS: 'QUARTERS';
QUERY_BLOCK: 'QUERY_BLOCK';
QUERY: 'QUERY';
QUEUE_CURR: 'QUEUE_CURR';
@ -1364,6 +1430,7 @@ QUEUE_ROWP: 'QUEUE_ROWP';
QUIESCE: 'QUIESCE';
QUORUM: 'QUORUM';
QUOTA: 'QUOTA';
QUOTAGROUP: 'QUOTAGROUP';
RAISE: 'RAISE';
RANDOM_LOCAL: 'RANDOM_LOCAL';
RANDOM: 'RANDOM';
@ -1425,6 +1492,7 @@ RELIES_ON: 'RELIES_ON';
RELOCATE: 'RELOCATE';
RELY: 'RELY';
REMAINDER: 'REMAINDER';
REMOTE: 'REMOTE';
REMOTE_MAPPED: 'REMOTE_MAPPED';
REMOVE: 'REMOVE';
RENAME: 'RENAME';
@ -1501,9 +1569,11 @@ SCRUB: 'SCRUB';
SD_ALL: 'SD_ALL';
SD_INHIBIT: 'SD_INHIBIT';
SDO_GEOM_MBR: 'SDO_GEOM_MBR';
SDO_GEOMETRY: 'SDO_GEOMETRY';
SD_SHOW: 'SD_SHOW';
SEARCH: 'SEARCH';
SECOND: 'SECOND';
SECONDS: 'SECONDS';
SECRET: 'SECRET';
SECUREFILE_DBA: 'SECUREFILE_DBA';
SECUREFILE: 'SECUREFILE';
@ -1527,6 +1597,7 @@ SERIALLY_REUSABLE: 'SERIALLY_REUSABLE';
SERIAL: 'SERIAL';
SERVERERROR: 'SERVERERROR';
SERVICE_NAME_CONVERT: 'SERVICE_NAME_CONVERT';
SERVICE: 'SERVICE';
SERVICES: 'SERVICES';
SESSION_CACHED_CURSORS: 'SESSION_CACHED_CURSORS';
SESSION: 'SESSION';
@ -1538,6 +1609,7 @@ SETS: 'SETS';
SETTINGS: 'SETTINGS';
SET_TO_JOIN: 'SET_TO_JOIN';
SEVERE: 'SEVERE';
SHARDSPACE: 'SHARDSPACE';
SHARED_POOL: 'SHARED_POOL';
SHARED: 'SHARED';
SHARE: 'SHARE';
@ -1548,6 +1620,7 @@ SHRINK: 'SHRINK';
SHUTDOWN: 'SHUTDOWN';
SIBLINGS: 'SIBLINGS';
SID: 'SID';
SITE: 'SITE';
SIGNAL_COMPONENT: 'SIGNAL_COMPONENT';
SIGNAL_FUNCTION: 'SIGNAL_FUNCTION';
SIGN: 'SIGN';
@ -1581,11 +1654,13 @@ SQLDATA: 'SQLDATA';
SQLERROR: 'SQLERROR';
SQLLDR: 'SQLLDR';
SQL: 'SQL';
SQL_MACRO: 'SQL_MACRO';
SQL_TRACE: 'SQL_TRACE';
SQL_TRANSLATION_PROFILE: 'SQL_TRANSLATION_PROFILE';
SQRT: 'SQRT';
STALE: 'STALE';
STANDALONE: 'STANDALONE';
STANDARD: 'STANDARD';
STANDARD_HASH: 'STANDARD_HASH';
STANDBY_MAX_DATA_DELAY: 'STANDBY_MAX_DATA_DELAY';
STANDBYS: 'STANDBYS';
@ -1651,6 +1726,7 @@ SWITCH: 'SWITCH';
SYNCHRONOUS: 'SYNCHRONOUS';
SYNC: 'SYNC';
SYNONYM: 'SYNONYM';
SYS: 'SYS';
SYSASM: 'SYSASM';
SYS_AUDIT: 'SYS_AUDIT';
SYSAUX: 'SYSAUX';
@ -1930,6 +2006,7 @@ SYS_XQXFORM: 'SYS_XQXFORM';
SYS_XSID_TO_RAW: 'SYS_XSID_TO_RAW';
SYS_ZMAP_FILTER: 'SYS_ZMAP_FILTER';
SYS_ZMAP_REFRESH: 'SYS_ZMAP_REFRESH';
T_LETTER: 'T';
TABLE_LOOKUP_BY_NL: 'TABLE_LOOKUP_BY_NL';
TABLESPACE_NO: 'TABLESPACE_NO';
TABLESPACE: 'TABLESPACE';
@ -1968,6 +2045,7 @@ TIMEZONE_MINUTE: 'TIMEZONE_MINUTE';
TIMEZONE_OFFSET: 'TIMEZONE_OFFSET';
TIMEZONE_REGION: 'TIMEZONE_REGION';
TIME_ZONE: 'TIME_ZONE';
TIMING: 'TIMING';
TIV_GB: 'TIV_GB';
TIV_SSF: 'TIV_SSF';
TO_ACLID: 'TO_ACLID';
@ -1994,6 +2072,7 @@ TRACING: 'TRACING';
TRACKING: 'TRACKING';
TRAILING: 'TRAILING';
TRANSACTION: 'TRANSACTION';
TRANSFORM: 'TRANSFORM';
TRANSFORM_DISTINCT_AGG: 'TRANSFORM_DISTINCT_AGG';
TRANSITIONAL: 'TRANSITIONAL';
TRANSITION: 'TRANSITION';
@ -2094,6 +2173,7 @@ UTF8: 'UTF8';
V1: 'V1';
V2: 'V2';
VALIDATE: 'VALIDATE';
VALIDATE_CONVERSION: 'VALIDATE_CONVERSION';
VALIDATION: 'VALIDATION';
VALID_TIME_END: 'VALID_TIME_END';
VALUES: 'VALUES';
@ -2204,6 +2284,28 @@ PREDICTION_DETAILS: 'PREDICTION_DETAILS';
PREDICTION_PROBABILITY: 'PREDICTION_PROBABILITY';
PREDICTION_SET: 'PREDICTION_SET';
BLOCKCHAIN: 'BLOCKCHAIN';
COLLATE: 'COLLATE';
COLLATION: 'COLLATION';
DEFINITION: 'DEFINITION';
DUPLICATED: 'DUPLICATED';
EXTENDED: 'EXTENDED';
HASHING: 'HASHING';
IDLE: 'IDLE';
IMMUTABLE: 'IMMUTABLE';
ORACLE_DATAPUMP: 'ORACLE_DATAPUMP';
ORACLE_HDFS: 'ORACLE_HDFS';
ORACLE_HIVE: 'ORACLE_HIVE';
ORACLE_LOADER: 'ORACLE_LOADER';
SHA2_512_Q: '"SHA2_512"';
SHARDED: 'SHARDED';
V1_Q: '"V1"';
ISOLATE: 'ISOLATE';
ROOT: 'ROOT';
UNITE: 'UNITE';
ALGORITHM: 'ALGORITHM';
CUME_DIST: 'CUME_DIST';
DENSE_RANK: 'DENSE_RANK';
LISTAGG: 'LISTAGG';
@ -2217,9 +2319,12 @@ CORR: 'CORR';
COVAR_: 'COVAR_';
DECODE: 'DECODE';
LAG: 'LAG';
LAG_DIFF: 'LAG_DIFF';
LAG_DIFF_PERCENT: 'LAG_DIFF_PERCENT';
LEAD: 'LEAD';
MAX: 'MAX';
MEDIAN: 'MEDIAN';
MEMOPTIMIZE: 'MEMOPTIMIZE';
MIN: 'MIN';
NTILE: 'NTILE';
NVL: 'NVL';
@ -2251,7 +2356,7 @@ BIT_STRING_LIT: 'B' ('\'' [01]* '\'')+;
// Rule #284 <HEX_STRING_LIT> - subtoken typecast in <REGULAR_ID>
// Lowercase 'x' is a usual addition to the standard
HEX_STRING_LIT: 'X' ('\'' [A-F0-9]* '\'')+;
HEX_STRING_LIT: 'X' ('\'' [A-Fa-f0-9]* '\'')+;
DOUBLE_PERIOD: '..';
PERIOD: '.';
@ -2274,6 +2379,7 @@ PERIOD: '.';
UNSIGNED_INTEGER: [0-9]+;
APPROXIMATE_NUM_LIT: FLOAT_FRAGMENT ('E' ('+'|'-')? (FLOAT_FRAGMENT | [0-9]+))? ('D' | 'F')?;
// Rule #--- <CHAR_STRING> is a base for Rule #065 <char_string_lit> , it incorporates <character_representation>
// and a superfluous subtoken typecasting of the "QUOTE"
CHAR_STRING: '\'' (~('\'' | '\r' | '\n') | '\'' '\'' | NEWLINE)* '\'';
@ -2304,6 +2410,9 @@ COMMA: ',';
SOLIDUS: '/';
AT_SIGN: '@';
ASSIGN_OP: ':=';
HASH_OP: '#';
SQ: '\'';
BINDVAR
: ':' SIMPLE_LETTER (SIMPLE_LETTER | [0-9] | '_')*
@ -2359,7 +2468,7 @@ SPACES: [ \t\r\n]+ -> channel(HIDDEN);
fragment NEWLINE_EOF : NEWLINE | EOF;
fragment QUESTION_MARK : '?';
fragment SIMPLE_LETTER : [A-Z];
fragment SIMPLE_LETTER : [a-zA-Z];
fragment FLOAT_FRAGMENT : UNSIGNED_INTEGER* '.'? UNSIGNED_INTEGER+;
fragment NEWLINE : '\r'? '\n';
fragment SPACE : [ \t];

View File

@ -24,11 +24,14 @@ options {
tokenVocab=PlSqlLexer;
superClass=PlSqlBaseParser;
}
@parser::header {
import PlSqlBaseParser from './PlSqlBaseParser';
}
program: sql_script EOF;
program: sql_script SEMICOLON? EOF;
sql_script
: ((unit_statement | sql_plus_command) SEMICOLON?)* EOF
: (unit_statement | sql_plus_command)*
;
unit_statement
@ -268,21 +271,6 @@ trigger_body
| trigger_block
;
routine_clause
: routine_name function_argument?
;
compound_trigger_block
: COMPOUND TRIGGER seq_of_declare_specs? timing_point_section+ END trigger_name
;
timing_point_section
: bk=BEFORE STATEMENT IS trigger_block BEFORE STATEMENT ';'
| bk=BEFORE EACH ROW IS trigger_block BEFORE EACH ROW ';'
| ak=AFTER STATEMENT IS trigger_block AFTER STATEMENT ';'
| ak=AFTER EACH ROW IS trigger_block AFTER EACH ROW ';'
;
non_dml_event
: ALTER
| ANALYZE
@ -366,14 +354,6 @@ alter_method_element
: (ADD | DROP) (map_order_function_spec | subprogram_spec)
;
alter_attribute_definition
: (ADD | MODIFY | DROP) ATTRIBUTE (attribute_definition | '(' attribute_definition (',' attribute_definition)* ')')
;
attribute_definition
: attribute_name type_spec?
;
alter_collection_clauses
: MODIFY (LIMIT expression | ELEMENT TYPE type_spec)
;
@ -1621,27 +1601,6 @@ mv_log_augmentation
new_values_clause?
;
// Should bound this to just date/time expr
datetime_expr
: expression
;
// Should bound this to just interval expr
interval_expr
: expression
;
synchronous_or_asynchronous
: SYNCHRONOUS
| ASYNCHRONOUS
;
including_or_excluding
: INCLUDING
| EXCLUDING
;
create_materialized_view_log
: CREATE MATERIALIZED VIEW LOG ON tableview_name
( ( physical_attributes_clause
@ -2005,12 +1964,6 @@ lob_partitioning_storage
)
;
datatype_null_enable
: column_name datatype
SORT? (DEFAULT expression)? (ENCRYPT ( USING CHAR_STRING )? (IDENTIFIED BY REGULAR_ID)? CHAR_STRING? ( NO? SALT )? )?
(NOT NULL_)? (ENABLE | DISABLE)?
;
//Technically, this should only allow 'K' | 'M' | 'G' | 'T' | 'P' | 'E'
// but having issues with examples/numbers01.sql line 11 "sysdate -1m"
size_clause
@ -2286,8 +2239,7 @@ database_file_clauses
create_datafile_clause
: CREATE DATAFILE (filename | filenumber) (',' (filename | filenumber) )*
(AS (//TODO (','? file_specification)+ |
NEW) )?
(AS (NEW) )?
;
alter_datafile_clause
@ -2655,11 +2607,6 @@ add_column_clause
')'
| ( column_definition | virtual_column_definition ))
column_properties?
//TODO (','? out_of_line_part_storage )
;
alter_varray_col_properties
: MODIFY VARRAY varray_item '(' modify_lob_parameters ')'
;
varray_col_properties
@ -2760,19 +2707,6 @@ column_properties
| xmltype_column_properties
;
period_definition
: {this.isVersion12()}? PERIOD FOR column_name
( '(' start_time_column ',' end_time_column ')' )?
;
start_time_column
: column_name
;
end_time_column
: column_name
;
column_definition
: column_name (datatype | type_name)
SORT? (DEFAULT expression)? (ENCRYPT (USING CHAR_STRING)? (IDENTIFIED BY regular_id)? CHAR_STRING? (NO? SALT)? )? (inline_constraint* | inline_ref_constraint)
@ -2787,10 +2721,6 @@ autogenerated_sequence_definition
: GENERATED (ALWAYS | BY DEFAULT (ON NULL_)?)? AS IDENTITY
;
out_of_line_part_storage
: PARTITION partition_name
;
nested_table_col_properties
: NESTED TABLE (nested_item | COLUMN_VALUE) substitutable_column_clause? (LOCAL | GLOBAL)?
STORE AS tableview_name ( '(' ( '(' object_properties ')'
@ -2819,10 +2749,6 @@ supplemental_logging_props
: SUPPLEMENTAL LOG (supplemental_log_grp_clause | supplemental_id_key_clause)
;
column_or_attribute
: regular_id
;
object_type_col_properties
: COLUMN column=regular_id substitutable_column_clause
;
@ -2852,33 +2778,11 @@ drop_primary_key_or_unique_or_generic_clause
| CONSTRAINT constraint_name CASCADE?
;
add_constraint
: ADD (CONSTRAINT constraint_name)? add_constraint_clause (',' (CONSTRAINT constraint_name)? add_constraint_clause)+
;
add_constraint_clause
: primary_key_clause
| foreign_key_clause
| unique_key_clause
| check_constraint
;
check_constraint
: CHECK '(' condition ')' DISABLE?
;
drop_constraint
: DROP CONSTRAINT constraint_name
;
enable_constraint
: ENABLE CONSTRAINT constraint_name
;
disable_constraint
: DISABLE CONSTRAINT constraint_name
;
foreign_key_clause
: FOREIGN KEY paren_column_list references_clause on_delete_clause?
;
@ -2891,14 +2795,6 @@ on_delete_clause
: ON DELETE (CASCADE | SET NULL_)
;
unique_key_clause
: UNIQUE paren_column_list using_index_clause?
;
primary_key_clause
: PRIMARY KEY paren_column_list using_index_clause?
;
// Anonymous PL/SQL code block
anonymous_block
@ -3053,10 +2949,6 @@ statement
| procedure_call
;
swallow_to_semi
: ~';'+
;
assignment_statement
: (general_element | bind_variable) ASSIGN_OP expression
;
@ -3322,7 +3214,7 @@ subquery_operation_part
query_block
: SELECT (DISTINCT | UNIQUE | ALL)? selected_list
into_clause? from_clause where_clause? hierarchical_query_clause? group_by_clause? model_clause? order_by_clause?
into_clause? from_clause? where_clause? hierarchical_query_clause? group_by_clause? model_clause? order_by_clause?
;
selected_list
@ -4146,10 +4038,6 @@ rollback_segment_name
: identifier
;
table_var_name
: identifier
;
schema_name
: identifier
;
@ -6741,19 +6629,3 @@ non_reserved_keywords_pre12c
| YES
| ZONE
;
string_function_name
: CHR
| DECODE
| SUBSTR
| TO_CHAR
| TRIM
;
numeric_function_name
: AVG
| COUNT
| NVL
| ROUND
| SUM
;

View File

@ -490,12 +490,12 @@ windowFrame: (
)?;
frameExtent:
frameType = RANGE start = frameBound
| frameType = ROWS start = frameBound
| frameType = GROUPS start = frameBound
| frameType = RANGE BETWEEN start = frameBound AND end = frameBound
| frameType = ROWS BETWEEN start = frameBound AND end = frameBound
| frameType = GROUPS BETWEEN start = frameBound AND end = frameBound;
frameType = RANGE frameStart = frameBound
| frameType = ROWS frameStart = frameBound
| frameType = GROUPS frameStart = frameBound
| frameType = RANGE BETWEEN frameStart = frameBound AND end = frameBound
| frameType = ROWS BETWEEN frameStart = frameBound AND end = frameBound
| frameType = GROUPS BETWEEN frameStart = frameBound AND end = frameBound;
frameBound:
UNBOUNDED boundType = PRECEDING # unboundedFrame

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -548,11 +548,9 @@ EscapeStringConstant=547
UnterminatedEscapeStringConstant=548
InvalidEscapeStringConstant=549
InvalidUnterminatedEscapeStringConstant=550
AfterEscapeStringConstantMode_NotContinued=551
AfterEscapeStringConstantWithNewlineMode_NotContinued=552
DollarText=553
EndDollarStringConstant=554
AfterEscapeStringConstantWithNewlineMode_Continued=555
DollarText=551
EndDollarStringConstant=552
AfterEscapeStringConstantWithNewlineMode_Continued=553
'$'=1
'('=2
')'=3
@ -1063,4 +1061,4 @@ AfterEscapeStringConstantWithNewlineMode_Continued=555
'LOOP'=510
'OPEN'=511
'\\\\'=545
'\''=555
'\''=553

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,60 @@
import { Lexer } from "antlr4ts/Lexer";
import { Lexer } from 'antlr4';
function isLetter(str) {
return str.length === 1 && str.match(/[a-z]/i);
}
export default class PostgreSQLLexerBase extends Lexer {
export default abstract class PostgreSQLLexerBase extends Lexer {
tags: string[] = [];
_interp: any;
constructor(input) {
super(input);
}
pushTag() {
this.tags.push(this.text);
}
isTag() {
return this.text === this.tags[this.tags.length - 1];
}
popTag() {
this.tags.pop();
}
getInputStream() {
return this._input;
}
checkLA( c) {
// eslint-disable-next-line new-cap
return this.getInputStream().LA(1) !== c;
}
charIsLetter() {
// eslint-disable-next-line new-cap
return isLetter(this.getInputStream().LA(-1));
}
HandleNumericFail() {
this.getInputStream().seek(this.getInputStream().index - 2);
const Integral = 535;
this.type = Integral;
}
HandleLessLessGreaterGreater() {
const LESS_LESS = 18;
const GREATER_GREATER = 19;
if (this.text === '<<') this.type = LESS_LESS;
if (this.text === '>>') this.type = GREATER_GREATER;
}
UnterminatedBlockCommentDebugAssert() {
// Debug.Assert(InputStream.LA(1) == -1 /*EOF*/);
}
CheckIfUtf32Letter() {
// eslint-disable-next-line new-cap
@ -21,52 +68,4 @@ export default class PostgreSQLLexerBase extends Lexer {
}
return isLetter(c[0]);
}
UnterminatedBlockCommentDebugAssert() {
// Debug.Assert(InputStream.LA(1) == -1 /*EOF*/);
}
HandleLessLessGreaterGreater() {
const LESS_LESS = 18;
const GREATER_GREATER = 19;
if (this.text === '<<') {
this._type = LESS_LESS;
}
if (this.text === '>>') {
this._type = GREATER_GREATER;
}
}
HandleNumericFail() {
this.getInputStream().seek(this.getInputStream().index - 2);
const Integral = 535;
this._type = Integral;
}
charIsLetter() {
// eslint-disable-next-line new-cap
return isLetter(this.getInputStream().LA(-1));
}
pushTag() {
this.tags.push(this.text);
};
isTag() {
return this.text === this.tags.pop();
}
popTag() {
this.tags.pop();
}
getInputStream() {
return this._input;
}
checkLA(c) {
// eslint-disable-next-line new-cap
return this.getInputStream().LA(1) !== c;
}
}

File diff suppressed because one or more lines are too long

View File

@ -548,11 +548,9 @@ EscapeStringConstant=547
UnterminatedEscapeStringConstant=548
InvalidEscapeStringConstant=549
InvalidUnterminatedEscapeStringConstant=550
AfterEscapeStringConstantMode_NotContinued=551
AfterEscapeStringConstantWithNewlineMode_NotContinued=552
DollarText=553
EndDollarStringConstant=554
AfterEscapeStringConstantWithNewlineMode_Continued=555
DollarText=551
EndDollarStringConstant=552
AfterEscapeStringConstantWithNewlineMode_Continued=553
'$'=1
'('=2
')'=3
@ -1063,4 +1061,4 @@ AfterEscapeStringConstantWithNewlineMode_Continued=555
'LOOP'=510
'OPEN'=511
'\\\\'=545
'\''=555
'\''=553

File diff suppressed because one or more lines are too long

View File

@ -1,24 +1,20 @@
/* eslint-disable new-cap,camelcase */
import { Parser, CharStreams, CommonTokenStream } from 'antlr4';
import PostgreSQLLexer from './PostgreSQLLexer';
import PostgreSQLParser from './PostgreSQLParser';
import { CharStreams, CommonTokenStream, Parser } from 'antlr4ts';
import { PostgreSQLLexer } from './PostgreSQLLexer';
import { PostgreSQLParser } from './PostgreSQLParser';
export default class PostgreSQLParserBase extends Parser {
getPostgreSQLParser(script) {
const charStream = CharStreams.fromString(script);
const lexer = new PostgreSQLLexer(charStream);
const tokens = new CommonTokenStream(lexer);
const parser = new PostgreSQLParser(tokens);
return parser;
// @ts-ignore
export default abstract class PostgreSQLParserBase extends Parser {
constructor( input) {
super(input);
}
GetParsedSqlTree(script, line) {
GetParsedSqlTree( script, line) {
const ph = this.getPostgreSQLParser(script);
return ph.program();
}
ParseRoutineBody(_localctx) {
ParseRoutineBody( _localctx) {
let lang = null;
for (let _i = 0, _a = _localctx.createfunc_opt_item(); _i < _a.length; _i++) {
const coi = _a[_i];
@ -35,13 +31,10 @@ export default class PostgreSQLParserBase extends Parser {
}
}
}
if (!lang) {
return;
}
if (!lang) return;
// eslint-disable-next-line camelcase
let func_as = null;
for (let _b = 0, _c = _localctx.createfunc_opt_item(); _b < _c.length; _b++) {
const a = _c[_b];
for (const a of _localctx.createfunc_opt_item()) {
if (!a.func_as()) {
// eslint-disable-next-line camelcase
func_as = a;
@ -49,9 +42,8 @@ export default class PostgreSQLParserBase extends Parser {
}
}
// eslint-disable-next-line camelcase
if (!!func_as) {
if (!func_as) {
const txt = this.GetRoutineBodyString(func_as.func_as().sconst(0));
// @ts-ignore
const line = func_as.func_as().sconst(0).start.getLine();
const ph = this.getPostgreSQLParser(txt);
switch (lang) {
@ -65,46 +57,47 @@ export default class PostgreSQLParserBase extends Parser {
}
}
TrimQuotes(s: string) {
return (!s) ? s : s.substring(1, s.length - 1);
TrimQuotes( s) {
return (!s) ? s : s.substring(1, s.length() - 1);
}
unquote(s: string) {
const slength = s.length;
unquote( s) {
const slength = s.length();
let r = '';
let i = 0;
while (i < slength) {
const c = s.charAt(i);
r = r.concat(c);
if (c === '\'' && i < slength - 1 && (s.charAt(i + 1) === '\'')) {
i++;
}
if (c === '\'' && i < slength - 1 && (s.charAt(i + 1) === '\'')) i++;
i++;
}
return r.toString();
};
}
GetRoutineBodyString(rule) {
GetRoutineBodyString( rule) {
const anysconst = rule.anysconst();
// eslint-disable-next-line new-cap
const StringConstant = anysconst.StringConstant();
if (!!StringConstant) {
return this.unquote(this.TrimQuotes(StringConstant.getText()));
}
if (null !== StringConstant) return this.unquote(this.TrimQuotes(StringConstant.getText()));
const UnicodeEscapeStringConstant = anysconst.UnicodeEscapeStringConstant();
if (!!UnicodeEscapeStringConstant) {
return this.TrimQuotes(UnicodeEscapeStringConstant.getText());
}
if (null !== UnicodeEscapeStringConstant) return this.TrimQuotes(UnicodeEscapeStringConstant.getText());
const EscapeStringConstant = anysconst.EscapeStringConstant();
if (!!EscapeStringConstant) {
return this.TrimQuotes(EscapeStringConstant.getText());
}
if (null !== EscapeStringConstant) return this.TrimQuotes(EscapeStringConstant.getText());
let result = '';
const dollartext = anysconst.DollarText();
for (let _i = 0, dollartext_1 = dollartext; _i < dollartext_1.length; _i++) {
const s = dollartext_1[_i];
for (const s of dollartext) {
result += s.getText();
}
return result;
}
getPostgreSQLParser( script) {
const charStream = CharStreams.fromString(script);
const lexer = new PostgreSQLLexer(charStream);
const tokens = new CommonTokenStream(lexer);
const parser = new PostgreSQLParser(tokens);
// lexer.removeErrorListeners();
// parser.removeErrorListeners();
return parser;
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
import { Lexer } from "antlr4";
import { Lexer } from "antlr4ts/Lexer";
export default class PlSqlBaseLexer extends Lexer {
export default abstract class PlSqlBaseLexer extends Lexer {
_interp: any;
IsNewlineAtPos(pos: number): boolean {
const la = this._input.LA(pos);
return la == -1;
return la == -1 || String.fromCharCode(la) == '\n';
}
}

View File

@ -1,20 +1,30 @@
import { Parser } from 'antlr4';
import { Parser } from "antlr4ts/Parser";
import { TokenStream } from "antlr4ts/TokenStream";
export default class PlSqlBaseParser extends Parser {
export default abstract class PlSqlBaseParser extends Parser {
private _isVersion10: boolean = false;
private _isVersion12: boolean = true;
public isVersion10(): boolean {
constructor(input: TokenStream) {
super(input);
this._isVersion10 = false;
this._isVersion12 = true;
}
isVersion10(): boolean {
return this._isVersion10;
}
public isVersion12(): boolean {
isVersion12(): boolean {
return this._isVersion12;
}
public setVersion10(value: boolean): void {
setVersion10(value: boolean): void {
this._isVersion10 = value;
}
public setVersion12(value: boolean): void {
setVersion12(value: boolean): void {
this._isVersion12 = value;
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,13 @@
import { ParseTreeWalker, CommonTokenStream } from 'antlr4';
import type { Parser } from 'antlr4/src/antlr4';
import { Parser } from 'antlr4ts';
import { ParseTreeWalker } from 'antlr4ts/tree';
import ParserErrorListener, {
ParserError,
ErrorHandler,
ParserErrorCollector,
} from './parserErrorListener';
interface IParser {
interface IParser extends Parser {
// Lost in type definition
ruleNames: string[];
// Customized in our parser
@ -17,7 +18,7 @@ interface IParser {
* Custom Parser class, subclass needs extends it.
*/
export default abstract class BasicParser {
private _parser: IParser & Parser;
private _parser: IParser;
public parse(
input: string,
@ -66,16 +67,14 @@ export default abstract class BasicParser {
*/
public getAllTokens(input: string): string[] {
const lexer = this.createLexer(input);
const tokensStream = new CommonTokenStream(lexer);
tokensStream.fill();
return tokensStream.tokens;
return lexer.getAllTokens().map(token => token.text);
};
/**
* Get Parser instance by input string
* @param input
*/
public createParser(input: string): IParser & Parser {
public createParser(input: string): IParser {
const lexer = this.createLexer(input);
const parser: any = this.createParserFromLexer(lexer);
parser.buildParseTrees = true;

View File

@ -1,4 +1,4 @@
import { Token, Recognizer, ErrorListener, RecognitionException } from 'antlr4';
import { Token, Recognizer, ParserErrorListener, RecognitionException } from 'antlr4ts';
export interface ParserError {
startLine: number;
endLine: number;
@ -8,7 +8,7 @@ export interface ParserError {
}
export interface SyntaxError<T> {
recognizer: Recognizer<T>;
recognizer: Recognizer<T, any>;
offendingSymbol: Token;
line: number;
charPositionInLine: number;
@ -22,16 +22,15 @@ type ErrorOffendingSymbol = {
export type ErrorHandler<T> = (err: ParserError, errOption: SyntaxError<T>) => void;
export class ParserErrorCollector extends ErrorListener<ErrorOffendingSymbol> {
export class ParserErrorCollector implements ParserErrorListener {
private _errors: ParserError[];
constructor(error: ParserError[]) {
super();
this._errors = error;
}
syntaxError(
recognizer: Recognizer<ErrorOffendingSymbol>, offendingSymbol: ErrorOffendingSymbol, line: number,
recognizer: Recognizer<ErrorOffendingSymbol, any>, offendingSymbol: ErrorOffendingSymbol, line: number,
charPositionInLine: number, msg: string, e: RecognitionException,
) {
let endCol = charPositionInLine + 1;
@ -49,16 +48,15 @@ export class ParserErrorCollector extends ErrorListener<ErrorOffendingSymbol> {
}
export default class ParserErrorListener extends ErrorListener<ErrorOffendingSymbol> {
export default class CustomParserErrorListener implements ParserErrorListener {
private _errorHandler;
constructor(errorListener: ErrorHandler<ErrorOffendingSymbol>) {
super();
this._errorHandler = errorListener;
}
syntaxError(
recognizer: Recognizer<ErrorOffendingSymbol>, offendingSymbol: ErrorOffendingSymbol, line: number,
recognizer: Recognizer<ErrorOffendingSymbol, any>, offendingSymbol: ErrorOffendingSymbol, line: number,
charPositionInLine: number, msg: string, e: any,
) {
let endCol = charPositionInLine + 1;

View File

@ -1,14 +1,14 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import FlinkSqlLexer from '../lib/flinksql/FlinkSqlLexer';
import FlinkSqlParser from '../lib/flinksql/FlinkSqlParser';
import { CharStreams, CommonTokenStream } from 'antlr4ts';
import { FlinkSqlLexer } from '../lib/flinksql/FlinkSqlLexer';
import { FlinkSqlParser } from '../lib/flinksql/FlinkSqlParser';
import BasicParser from './common/basicParser';
export default class FlinkSQL extends BasicParser {
public createLexer(input: string): FlinkSqlLexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new FlinkSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): FlinkSqlParser {
public createParserFromLexer(lexer: FlinkSqlLexer): FlinkSqlParser {
const tokens = new CommonTokenStream(lexer);
const parser = new FlinkSqlParser(tokens);
return parser;

View File

@ -1,15 +1,15 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import SqlLexer from '../lib/generic/SqlLexer';
import SqlParser from '../lib/generic/SqlParser';
import { CharStreams, CommonTokenStream } from 'antlr4ts';
import { SqlLexer } from '../lib/generic/SqlLexer';
import { SqlParser } from '../lib/generic/SqlParser';
import BasicParser from './common/basicParser';
export default class GenericSQL extends BasicParser {
public createLexer(input: string): SqlLexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new SqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): SqlParser {
public createParserFromLexer(lexer: SqlLexer): SqlParser {
const tokenStream = new CommonTokenStream(lexer);
return new SqlParser(tokenStream);
}

View File

@ -1,15 +1,15 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import HiveSqlLexer from '../lib/hive/HiveSqlLexer';
import HiveSql from '../lib/hive/HiveSql';
import { CharStreams, CommonTokenStream } from 'antlr4ts';
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
import { HiveSql } from '../lib/hive/HiveSql';
import BasicParser from './common/basicParser';
export default class HiveSQL extends BasicParser {
public createLexer(input: string): HiveSqlLexer {
const chars = new CharStream(input);
const chars = CharStreams.fromString(input.toUpperCase());
const lexer = new HiveSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): HiveSql {
public createParserFromLexer(lexer: HiveSqlLexer): HiveSql {
const tokenStream = new CommonTokenStream(lexer);
return new HiveSql(tokenStream);
}

View File

@ -1,16 +1,16 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts';
import BasicParser from './common/basicParser';
import PostgreSQLLexer from '../lib/pgsql/PostgreSQLLexer';
import PostgreSQLParser from '../lib/pgsql/PostgreSQLParser';
import { PostgreSQLLexer } from '../lib/pgsql/PostgreSQLLexer';
import { PostgreSQLParser } from '../lib/pgsql/PostgreSQLParser';
export default class PostgresSQL extends BasicParser {
public createLexer(input: string): PostgreSQLLexer {
const chars = new CharStream(input.toUpperCase());
const chars = CharStreams.fromString(input.toUpperCase());
const lexer = new PostgreSQLLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
public createParserFromLexer(lexer: Lexer): PostgreSQLParser {
const tokenStream = new CommonTokenStream(lexer);
return new PostgreSQLParser(tokenStream);
}

View File

@ -1,16 +1,16 @@
import { CharStream, CommonTokenStream } from 'antlr4';
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts';
import BasicParser from './common/basicParser';
import PlSqlLexer from '../lib/plsql/PlSqlLexer';
import PlSqlParser from '../lib/plsql/PlSqlParser';
import { PlSqlLexer } from '../lib/plsql/PlSqlLexer';
import { PlSqlParser } from '../lib/plsql/PlSqlParser';
export default class PLSQLParser extends BasicParser {
public createLexer(input: string): PlSqlLexer {
const chars = new CharStream(input.toUpperCase());
const chars = CharStreams.fromString(input.toUpperCase());
const lexer = new PlSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: PlSqlLexer): PlSqlParser {
public createParserFromLexer(lexer: Lexer): PlSqlParser {
const tokenStream = new CommonTokenStream(lexer);
return new PlSqlParser(tokenStream);
}

View File

@ -1,15 +1,15 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import { CharStreams, CommonTokenStream } from 'antlr4ts';
import BasicParser from './common/basicParser';
import SparkSqlLexer from '../lib/spark/SparkSqlLexer';
import SparkSqlParser from '../lib/spark/SparkSqlParser';
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
import { SparkSqlParser } from '../lib/spark/SparkSqlParser';
export default class SparkSQL extends BasicParser {
public createLexer(input: string): Lexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
public createLexer(input: string): SparkSqlLexer {
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new SparkSqlLexer(chars);
return lexer;
}
public createParserFromLexer(lexer: Lexer): any {
public createParserFromLexer(lexer: SparkSqlLexer): SparkSqlParser {
const tokenStream = new CommonTokenStream(lexer);
return new SparkSqlParser(tokenStream);
}

View File

@ -1,10 +1,10 @@
import { CharStream, CommonTokenStream, Lexer } from 'antlr4';
import TrinoSqlLexer from '../lib/trinosql/TrinoSqlLexer';
import TrinoSqlParser from '../lib/trinosql/TrinoSqlParser';
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts';
import { TrinoSqlLexer } from '../lib/trinosql/TrinoSqlLexer';
import { TrinoSqlParser } from '../lib/trinosql/TrinoSqlParser';
import BasicParser from './common/basicParser';
export default class trinoSQL extends BasicParser {
public createLexer(input: string): TrinoSqlLexer {
const chars = new CharStream(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform
const lexer = new TrinoSqlLexer(chars);
return lexer;
}

View File

@ -6,6 +6,6 @@ describe('FlinkSQL Lexer tests', () => {
const sql = 'SELECT * FROM table1';
const tokens = parser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length - 1).toBe(7);
expect(tokens.length).toBe(7);
});
});

View File

@ -1,5 +1,5 @@
import FlinkSQL from '../../../src/parser/flinksql';
import FlinkSqlParserListener from '../../../src/lib/flinksql/FlinkSqlParserListener';
import { FlinkSqlParserListener } from '../../../src/lib/flinksql/FlinkSqlParserListener';
import { TableExpressionContext } from '../../../src/lib/flinksql/FlinkSqlParser';
describe('Flink SQL Listener Tests', () => {
@ -11,14 +11,10 @@ describe('Flink SQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends FlinkSqlParserListener {
constructor() {
super()
}
class MyListener implements FlinkSqlParserListener {
enterTableExpression = (ctx: TableExpressionContext): void => {
result = ctx.getText().toLowerCase();
result = ctx.text.toLowerCase();
}
}
const listenTableName = new MyListener();

View File

@ -23,7 +23,7 @@ describe('FlinkSQL Syntax Tests', () => {
expect(result.length).toBe(0);
});
test('Test simple Error Select Statement', () => {
const sql = `SELECTproduct, amount FROM;`;
const sql = `SELECT product, amount FROM;`;
const result = parser.validate(sql);
expect(result.length).toBe(1);
});

View File

@ -1,5 +1,6 @@
import FlinkSQL from '../../../src/parser/flinksql';
import FlinkSqlParserVisitor from '../../../src/lib/flinksql/FlinkSqlParserVisitor';
import { FlinkSqlParserVisitor } from '../../../src/lib/flinksql/FlinkSqlParserVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree';
describe('Flink SQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -12,9 +13,12 @@ describe('Flink SQL Visitor Tests', () => {
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends FlinkSqlParserVisitor<any>{
class MyVisitor extends AbstractParseTreeVisitor<any> implements FlinkSqlParserVisitor<any>{
protected defaultResult() {
return result;
}
visitTableExpression = (ctx): void => {
result = ctx.getText().toLowerCase();
result = ctx.text.toLowerCase();
}
}
const visitor: any = new MyVisitor();

View File

@ -7,6 +7,6 @@ describe('GenericSQL Lexer tests', () => {
const tokens = mysqlParser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length - 1).toBe(12);
expect(tokens.length).toBe(12);
});
});

View File

@ -1,5 +1,5 @@
import GenericSQL from '../../../src/parser/generic';
import SqlParserListener from '../../../src/lib/generic/SqlParserListener';
import { SqlParserListener } from '../../../src/lib/generic/SqlParserListener';
describe('Generic SQL Listener Tests', () => {
const expectTableName = 'user1';
@ -10,9 +10,9 @@ describe('Generic SQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends SqlParserListener {
class MyListener implements SqlParserListener {
enterTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
result = ctx.text.toLowerCase();
}
}
const listenTableName: any = new MyListener();

View File

@ -1,5 +1,6 @@
import GenericSQL from '../../../src/parser/generic';
import SqlParserVisitor from '../../../src/lib/generic/SqlParserVisitor';
import { SqlParserVisitor } from '../../../src/lib/generic/SqlParserVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree';
describe('Generic SQL Visitor Tests', () => {
const expectTableName = 'user1';
@ -12,14 +13,13 @@ describe('Generic SQL Visitor Tests', () => {
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends SqlParserVisitor<any> {
constructor() {
super();
class MyVisitor extends AbstractParseTreeVisitor<any> implements SqlParserVisitor<any> {
protected defaultResult() {
return result;
}
visitTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTableName?.(ctx);
result = ctx.text.toLowerCase();
}
}
const visitor = new MyVisitor();

View File

@ -5,12 +5,12 @@ describe('HiveSQL Lexer tests', () => {
test('select token counts', () => {
const sql = 'SELECT * FROM t1';
const tokens = parser.getAllTokens(sql);
expect(tokens.length - 1).toBe(4);
expect(tokens.length).toBe(7);
});
test('select token counts', () => {
const sql = 'show create table_name;';
const tokens = parser.getAllTokens(sql);
expect(tokens.length - 1).toBe(4);
expect(tokens.length).toBe(6);
});
});

View File

@ -1,4 +1,4 @@
import HiveSqlListener from '../../../src/lib/hive/HiveSqlListener';
import { HiveSqlListener } from '../../../src/lib/hive/HiveSqlListener';
import HiveSQL from '../../../src/parser/hive';
@ -10,28 +10,28 @@ describe('Hive SQL Listener Tests', () => {
const parserTree = parser.parse(sql);
let result = '';
class MyListener extends HiveSqlListener {
enterSelect_list = (ctx): void => {
result = ctx.getText();
class MyListener implements HiveSqlListener {
enterSelectItem(ctx) {
result = ctx.text;
}
}
const listenTableName: any = new MyListener();
await parser.listen(listenTableName, parserTree);
expect(result).toBe(expectTableName);
expect(result).toBe(expectTableName.toUpperCase());
});
test('Listener enterCreateTable', async () => {
const sql = `drop table table_name;`;
const parserTree = parser.parse(sql);
let result = '';
class MyListener extends HiveSqlListener {
enterDrop_stmt = (ctx): void => {
result = ctx.getText();
class MyListener implements HiveSqlListener {
enterDropTableStatement(ctx) {
result = ctx.text;
}
}
const listenTableName: any = new MyListener();
await parser.listen(listenTableName, parserTree);
expect(result).toBe('droptabletable_name');
expect(result).toBe('DROPTABLETABLE_NAME');
});
});

View File

@ -13,10 +13,8 @@ describe('Hive SQL Syntax Tests', () => {
expect(result.length).toBe(0);
});
test('Wrong Select Statement', () => {
const sql = 'SELECT add ABC from Where ;';
const sql = 'SELECT add ABC FROM WHERE;';
const result = parser.validate(sql);
expect(result.length).toBe(2);
expect(result[0].message).toBe(`no viable alternative at input 'SELECTaddABCfromWhere'`);
expect(result[1].message).toBe(`mismatched input 'Where' expecting <EOF>`);
});
});

View File

@ -1,7 +1,8 @@
import HiveSqlVisitor from '../../../src/lib/hive/HiveSqlVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { HiveSqlVisitor } from '../../../src/lib/hive/HiveSqlVisitor';
import HiveSQL from '../../../src/parser/hive';
describe('Generic SQL Visitor Tests', () => {
describe('Hive SQL Visitor Tests', () => {
const expectTableName = 'dm_gis.dlv_addr_tc_count';
const sql = `select citycode,tc,inc_day from ${expectTableName} where inc_day='20190501' limit 100;`;
const parser = new HiveSQL();
@ -12,10 +13,14 @@ describe('Generic SQL Visitor Tests', () => {
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends HiveSqlVisitor<any> {
visitTable_name = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTable_name?.(ctx);
class MyVisitor extends AbstractParseTreeVisitor<any> implements HiveSqlVisitor<any> {
defaultResult() {
return result;
}
visitTableName(ctx) {
result = ctx.text.toLowerCase();
}
}

View File

@ -0,0 +1,244 @@
SELECT * FROM onek
WHERE onek.unique1 < 10
ORDER BY onek.unique1;
SELECT onek.unique1, onek.stringu1 FROM onek
WHERE onek.unique1 < 20
ORDER BY unique1 using >;
SELECT onek.unique1, onek.stringu1 FROM onek
WHERE onek.unique1 > 980
ORDER BY stringu1 using <;
SELECT onek.unique1, onek.string4 FROM onek
WHERE onek.unique1 > 980
ORDER BY string4 using <, unique1 using >;
SELECT onek.unique1, onek.string4 FROM onek
WHERE onek.unique1 > 980
ORDER BY string4 using >, unique1 using <;
--
-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
-- sort +0nr -1 +1d -2
--
SELECT onek.unique1, onek.string4 FROM onek
WHERE onek.unique1 < 20
ORDER BY unique1 using >, string4 using <;
--
-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
-- sort +0n -1 +1dr -2
--
SELECT onek.unique1, onek.string4 FROM onek
WHERE onek.unique1 < 20
ORDER BY unique1 using <, string4 using >;
--
-- test partial btree indexes
--
-- As of 7.2, planner probably won't pick an indexscan without stats,
-- so ANALYZE first. Also, we want to prevent it from picking a bitmapscan
-- followed by sort, because that could hide index ordering problems.
--
ANALYZE onek2;
SET enable_seqscan TO off;
SET enable_bitmapscan TO off;
SET enable_sort TO off;
--
-- awk '{if($1<10){print $0;}else{next;}}' onek.data | sort +0n -1
--
SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10;
--
-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
--
SELECT onek2.unique1, onek2.stringu1 FROM onek2
WHERE onek2.unique1 < 20
ORDER BY unique1 using >;
--
-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
--
SELECT onek2.unique1, onek2.stringu1 FROM onek2
WHERE onek2.unique1 > 980;
RESET enable_seqscan;
RESET enable_bitmapscan;
RESET enable_sort;
SELECT two, stringu1, ten, string4
INTO TABLE tmp
FROM onek;
--
-- awk '{print $1,$2;}' person.data |
-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
-- awk 'BEGIN{FS=" ";}{if(NF!=2){print $4,$5;}else{print;}}' - stud_emp.data
--
-- SELECT name, age FROM person*; ??? check if different
SELECT p.name, p.age FROM person* p;
--
-- awk '{print $1,$2;}' person.data |
-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $4,$5;}else{print;}}' - stud_emp.data |
-- sort +1nr -2
--
SELECT p.name, p.age FROM person* p ORDER BY age using >, name;
--
-- Test some cases involving whole-row Var referencing a subquery
--
select foo from (select 1 offset 0) as foo;
select foo from (select null offset 0) as foo;
select foo from (select 'xyzzy',1,null offset 0) as foo;
--
-- Test VALUES lists
--
select * from onek, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j)
WHERE onek.unique1 = v.i and onek.stringu1 = v.j;
-- a more complex case
-- looks like we're coding lisp :-)
select * from onek,
(values ((select i from
(values(10000), (2), (389), (1000), (2000), ((select 10029))) as foo(i)
order by i asc limit 1))) bar (i)
where onek.unique1 = bar.i;
-- try VALUES in a subquery
select * from onek
where (unique1,ten) in (values (1,1), (20,0), (99,9), (17,99))
order by unique1;
-- VALUES is also legal as a standalone query or a set-operation member
VALUES (1,2), (3,4+4), (7,77.7);
VALUES (1,2), (3,4+4), (7,77.7)
UNION ALL
SELECT 2+2, 57
UNION ALL
TABLE int8_tbl;
--
-- Test ORDER BY options
--
CREATE TEMP TABLE foo (f1 int);
INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1);
SELECT * FROM foo ORDER BY f1;
SELECT * FROM foo ORDER BY f1 ASC; -- same thing
SELECT * FROM foo ORDER BY f1 NULLS FIRST;
SELECT * FROM foo ORDER BY f1 DESC;
SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
-- check if indexscans do the right things
CREATE INDEX fooi ON foo (f1);
SET enable_sort = false;
SELECT * FROM foo ORDER BY f1;
SELECT * FROM foo ORDER BY f1 NULLS FIRST;
SELECT * FROM foo ORDER BY f1 DESC;
SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
DROP INDEX fooi;
CREATE INDEX fooi ON foo (f1 DESC);
SELECT * FROM foo ORDER BY f1;
SELECT * FROM foo ORDER BY f1 NULLS FIRST;
SELECT * FROM foo ORDER BY f1 DESC;
SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
DROP INDEX fooi;
CREATE INDEX fooi ON foo (f1 DESC NULLS LAST);
SELECT * FROM foo ORDER BY f1;
SELECT * FROM foo ORDER BY f1 NULLS FIRST;
SELECT * FROM foo ORDER BY f1 DESC;
SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
--
-- Test planning of some cases with partial indexes
--
-- partial index is usable
explain (costs off)
select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
-- actually run the query with an analyze to use the partial index
explain (costs off, analyze on, timing off, summary off)
select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
explain (costs off)
select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
-- partial index predicate implies clause, so no need for retest
explain (costs off)
select * from onek2 where unique2 = 11 and stringu1 < 'B';
select * from onek2 where unique2 = 11 and stringu1 < 'B';
explain (costs off)
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
-- but if it's an update target, must retest anyway
explain (costs off)
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
-- partial index is not applicable
explain (costs off)
select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
-- partial index implies clause, but bitmap scan must recheck predicate anyway
SET enable_indexscan TO off;
explain (costs off)
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
RESET enable_indexscan;
-- check multi-index cases too
explain (costs off)
select unique1, unique2 from onek2
where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
select unique1, unique2 from onek2
where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
explain (costs off)
select unique1, unique2 from onek2
where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
select unique1, unique2 from onek2
where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
--
-- Test some corner cases that have been known to confuse the planner
--
-- ORDER BY on a constant doesn't really need any sorting
SELECT 1 AS x ORDER BY x;
-- But ORDER BY on a set-valued expression does
create function sillysrf(int) returns setof int as
'values (1),(10),(2),($1)' language sql immutable;
select sillysrf(42);
select sillysrf(-1) order by 1;
drop function sillysrf(int);
-- X = X isn't a no-op, it's effectively X IS NOT NULL assuming = is strict
-- (see bug #5084)
select * from (values (2),(null),(1)) v(k) where k = k order by k;
select * from (values (2),(null),(1)) v(k) where k = k;
-- Test partitioned tables with no partitions, which should be handled the
-- same as the non-inheritance case when expanding its RTE.
create table list_parted_tbl (a int,b int) partition by list (a);
create table list_parted_tbl1 partition of list_parted_tbl
for values in (1) partition by list(b);
explain (costs off) select * from list_parted_tbl;
drop table list_parted_tbl;

View File

@ -7,6 +7,6 @@ describe('PostgresSQL Lexer tests', () => {
const tokens = mysqlParser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length - 1).toBe(12);
expect(tokens.length).toBe(12);
});
});

View File

@ -1,4 +1,5 @@
import PostgreSQLParserListener from '../../../src/lib/pgsql/PostgreSQLParserListener';
import { Target_listContext } from '../../../src/lib/pgsql/PostgreSQLParser';
import { PostgreSQLParserListener } from '../../../src/lib/pgsql/PostgreSQLParserListener';
import PostgresSQL from '../../../src/parser/pgsql';
describe('PostgresSQL Listener Tests', () => {
@ -10,10 +11,9 @@ describe('PostgresSQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends PostgreSQLParserListener {
// eslint-disable-next-line camelcase
enterTable_ref = (ctx): void => {
result = ctx.getText().toLowerCase();
class MyListener implements PostgreSQLParserListener {
enterTable_ref(ctx) {
result = ctx.text.toLowerCase();
}
}
const listenTableName: any = new MyListener();

View File

@ -0,0 +1,18 @@
import PostgresSQL from "../../../src/parser/pgsql";
import { readSQL } from "../../helper";
const parser = new PostgresSQL();
const features = {
base: readSQL(__dirname, "select.sql"),
};
describe("Postgre SQL Query Statement Tests", () => {
describe("Base Select", () => {
features.base.forEach((sql) => {
it(sql, () => {
expect(parser.validate(sql).length).toBe(0);
});
});
});
});

View File

@ -1,10 +1,10 @@
import PostgresSQL from "../../../src/parser/pgsql";
describe('Generic SQL Syntax Tests', () => {
describe('PostgresSQL SQL Syntax Tests', () => {
const parser = new PostgresSQL();
test('Select Statement', () => {
const sql = 'select id,name from user1;';
const sql = 'select id, t_name from user1;';
const result = parser.validate(sql);
expect(result.length).toBe(0);
@ -15,4 +15,11 @@ describe('Generic SQL Syntax Tests', () => {
const result = parser.validate(sql);
expect(result.length).toBe(0);
});
test('Select 1+1', () => {
const sql = 'SELECT 1+1;';
const result = parser.validate(sql);
expect(result.length).toBe(0);
});
});

View File

@ -1,4 +1,5 @@
import PostgreSQLParserVisitor from "../../../src/lib/pgsql/PostgreSQLParserVisitor";
import { AbstractParseTreeVisitor } from "antlr4ts/tree/AbstractParseTreeVisitor";
import { PostgreSQLParserVisitor } from "../../../src/lib/pgsql/PostgreSQLParserVisitor";
import PostgresSQL from "../../../src/parser/pgsql";
describe('Generic SQL Visitor Tests', () => {
@ -12,11 +13,13 @@ describe('Generic SQL Visitor Tests', () => {
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends PostgreSQLParserVisitor<any> {
// eslint-disable-next-line camelcase
visitTable_ref = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTable_ref?.(ctx);
class MyVisitor extends AbstractParseTreeVisitor<any> implements PostgreSQLParserVisitor<any> {
protected defaultResult() {
return result;
}
visitTable_ref(ctx) {
result = ctx.text.toLowerCase();
}
}
const visitor: any = new MyVisitor();

View File

@ -7,6 +7,6 @@ describe('PLSQL Lexer tests', () => {
const tokens = parser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length - 1).toBe(12);
expect(tokens.length).toBe(12);
});
});

View File

@ -1,4 +1,4 @@
import PlSqlParserListener from '../../../src/lib/plsql/PlSqlParserListener';
import { PlSqlParserListener } from '../../../src/lib/plsql/PlSqlParserListener';
import PLSQL from '../../../src/parser/plsql';
describe('PLSQL Listener Tests', () => {
@ -10,10 +10,10 @@ describe('PLSQL Listener Tests', () => {
test('Listener enterTableName', async () => {
let result = '';
class MyListener extends PlSqlParserListener {
class MyListener implements PlSqlParserListener {
// eslint-disable-next-line camelcase
enterTable_ref_list = (ctx): void => {
result = ctx.getText().toLowerCase();
result = ctx.text.toLowerCase();
}
}
const listenTableName: any = new MyListener();

View File

@ -1,4 +1,5 @@
import PlSqlParserVisitor from '../../../src/lib/plsql/PlSqlParserVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { PlSqlParserVisitor } from '../../../src/lib/plsql/PlSqlParserVisitor';
import PLSQL from '../../../src/parser/plsql';
describe('PLSQL Visitor Tests', () => {
@ -10,11 +11,13 @@ describe('PLSQL Visitor Tests', () => {
test('Visitor visitTable_ref_list', () => {
let result = '';
class MyVisitor extends PlSqlParserVisitor<any> {
class MyVisitor extends AbstractParseTreeVisitor<any> implements PlSqlParserVisitor<any> {
protected defaultResult() {
return result;
}
// eslint-disable-next-line camelcase
visitTable_ref_list = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTable_ref_list?.(ctx);
result = ctx.text.toLowerCase();
}
}
const visitor: any = new MyVisitor();

View File

@ -6,12 +6,12 @@ describe('SparkSQL Lexer tests', () => {
test('select id,name from user1;', () => {
const sql = `select id,name from user1;`;
const tokens = parser.getAllTokens(sql);
expect(tokens.length - 1).toBe(10);
expect(tokens.length).toBe(10);
});
test('SELECT * FROM t WHERE x = 1 AND y = 2;', () => {
const sql = `SELECT * FROM t WHERE x = 1 AND y = 2;`;
const tokens = parser.getAllTokens(sql);
expect(tokens.length - 1).toBe(24);
expect(tokens.length).toBe(24);
});
});

View File

@ -1,4 +1,4 @@
import SparkSqlListener from '../../../src/lib/spark/SparkSqlListener';
import { SparkSqlListener } from '../../../src/lib/spark/SparkSqlListener';
import SparkSQL from '../../../src/parser/spark';
describe('Spark SQL Listener Tests', () => {
@ -10,9 +10,9 @@ describe('Spark SQL Listener Tests', () => {
test('Listener enterTableName', () => {
let result = '';
class MyListener extends SparkSqlListener {
class MyListener implements SparkSqlListener {
enterTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
result = ctx.text.toLowerCase();
}
}
const listenTableName: any = new MyListener();

View File

@ -10,7 +10,7 @@ const validateTest = (sqls) => {
error(i, sql);
error(result);
}
expect(result.length).toBe(0);
expect(result.find(i => i.message)).toBeUndefined();
});
};

View File

@ -1,4 +1,5 @@
import SparkSqlVisitor from '../../../src/lib/spark/SparkSqlVisitor';
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor';
import { SparkSqlVisitor } from '../../../src/lib/spark/SparkSqlVisitor';
import SparkSQL from '../../../src/parser/spark';
describe('Spark SQL Visitor Tests', () => {
@ -12,10 +13,12 @@ describe('Spark SQL Visitor Tests', () => {
test('Visitor visitTableName', () => {
let result = '';
class MyVisitor extends SparkSqlVisitor<any> {
class MyVisitor extends AbstractParseTreeVisitor<any> implements SparkSqlVisitor<any> {
protected defaultResult() {
return result;
}
visitTableName = (ctx): void => {
result = ctx.getText().toLowerCase();
super.visitTableName?.(ctx);
result = ctx.text.toLowerCase();
}
}
const visitor: any = new MyVisitor();

View File

@ -7,6 +7,6 @@ describe('trinoSQL Lexer tests', () => {
const tokens = parser.getAllTokens(sql);
test('token counts', () => {
expect(tokens.length - 1).toBe(7);
expect(tokens.length).toBe(7);
});
});

Some files were not shown because too many files have changed in this diff Show More