feat: FlinkSQL supports auto complete (#115)

* feat: add antlr4-c3 dependencies

* feat: distinguish table, catalog and database from uid

* feat: move semicolon from sqlStatements to sqlStatement

* chore: move antlr4ts-cli to devDependencies

* feat: improve basic parser and support suggestions of token and syntax

* feat: implement suggest method in sql parsers

* test: flink sql suggestion test cases

* feat: optimize ts defination of suggestion

* feat: add split listener and optimize performance of auto-completion

* test: supplementary flink suggestion unit tests
This commit is contained in:
Hayden 2023-06-09 11:22:53 +08:00 committed by GitHub
parent 2637f90295
commit 1b02ff5d75
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 4521 additions and 3418 deletions

View File

@ -34,6 +34,7 @@
"@types/node": "^18.15.11", "@types/node": "^18.15.11",
"@typescript-eslint/eslint-plugin": "^3.10.1", "@typescript-eslint/eslint-plugin": "^3.10.1",
"@typescript-eslint/parser": "^3.10.1", "@typescript-eslint/parser": "^3.10.1",
"antlr4ts-cli": "^0.5.0-alpha.4",
"eslint": "^7.32.0", "eslint": "^7.32.0",
"eslint-config-google": "^0.14.0", "eslint-config-google": "^0.14.0",
"jest": "^29.5.0", "jest": "^29.5.0",
@ -47,7 +48,7 @@
"registry": "https://registry.npmjs.org/" "registry": "https://registry.npmjs.org/"
}, },
"dependencies": { "dependencies": {
"antlr4ts": "^0.5.0-alpha.4", "antlr4-c3": "^3.0.1",
"antlr4ts-cli": "^0.5.0-alpha.4" "antlr4ts": "^0.5.0-alpha.4"
} }
} }

View File

@ -1,4 +1,4 @@
lockfileVersion: 5.4 lockfileVersion: 5.3
specifiers: specifiers:
'@swc/core': ^1.3.60 '@swc/core': ^1.3.60
@ -7,6 +7,7 @@ specifiers:
'@types/node': ^18.15.11 '@types/node': ^18.15.11
'@typescript-eslint/eslint-plugin': ^3.10.1 '@typescript-eslint/eslint-plugin': ^3.10.1
'@typescript-eslint/parser': ^3.10.1 '@typescript-eslint/parser': ^3.10.1
antlr4-c3: ^3.0.1
antlr4ts: ^0.5.0-alpha.4 antlr4ts: ^0.5.0-alpha.4
antlr4ts-cli: ^0.5.0-alpha.4 antlr4ts-cli: ^0.5.0-alpha.4
eslint: ^7.32.0 eslint: ^7.32.0
@ -17,16 +18,17 @@ specifiers:
yargs-parser: ^21.1.1 yargs-parser: ^21.1.1
dependencies: dependencies:
antlr4-c3: 3.0.1
antlr4ts: 0.5.0-alpha.4 antlr4ts: 0.5.0-alpha.4
antlr4ts-cli: 0.5.0-alpha.4
devDependencies: devDependencies:
'@swc/core': 1.3.60 '@swc/core': 1.3.60
'@swc/jest': 0.2.26_@swc+core@1.3.60 '@swc/jest': 0.2.26_@swc+core@1.3.60
'@types/jest': 29.5.1 '@types/jest': 29.5.1
'@types/node': 18.16.16 '@types/node': 18.16.16
'@typescript-eslint/eslint-plugin': 3.10.1_ughicqazzfm5kkseraofkm6oci '@typescript-eslint/eslint-plugin': 3.10.1_a18e814019c959d52a44881c5533ce12
'@typescript-eslint/parser': 3.10.1_cgdknpc562nnyruteofhkegnom '@typescript-eslint/parser': 3.10.1_eslint@7.32.0+typescript@5.0.4
antlr4ts-cli: 0.5.0-alpha.4
eslint: 7.32.0 eslint: 7.32.0
eslint-config-google: 0.14.0_eslint@7.32.0 eslint-config-google: 0.14.0_eslint@7.32.0
jest: 29.5.0_@types+node@18.16.16 jest: 29.5.0_@types+node@18.16.16
@ -942,7 +944,7 @@ packages:
'@types/yargs-parser': 21.0.0 '@types/yargs-parser': 21.0.0
dev: true dev: true
/@typescript-eslint/eslint-plugin/3.10.1_ughicqazzfm5kkseraofkm6oci: /@typescript-eslint/eslint-plugin/3.10.1_a18e814019c959d52a44881c5533ce12:
resolution: {integrity: sha512-PQg0emRtzZFWq6PxBcdxRH3QIQiyFO3WCVpRL3fgj5oQS3CDs3AeAKfv4DxNhzn8ITdNJGJ4D3Qw8eAJf3lXeQ==} resolution: {integrity: sha512-PQg0emRtzZFWq6PxBcdxRH3QIQiyFO3WCVpRL3fgj5oQS3CDs3AeAKfv4DxNhzn8ITdNJGJ4D3Qw8eAJf3lXeQ==}
engines: {node: ^10.12.0 || >=12.0.0} engines: {node: ^10.12.0 || >=12.0.0}
peerDependencies: peerDependencies:
@ -953,8 +955,8 @@ packages:
typescript: typescript:
optional: true optional: true
dependencies: dependencies:
'@typescript-eslint/experimental-utils': 3.10.1_cgdknpc562nnyruteofhkegnom '@typescript-eslint/experimental-utils': 3.10.1_eslint@7.32.0+typescript@5.0.4
'@typescript-eslint/parser': 3.10.1_cgdknpc562nnyruteofhkegnom '@typescript-eslint/parser': 3.10.1_eslint@7.32.0+typescript@5.0.4
debug: 4.3.4 debug: 4.3.4
eslint: 7.32.0 eslint: 7.32.0
functional-red-black-tree: 1.0.1 functional-red-black-tree: 1.0.1
@ -966,7 +968,7 @@ packages:
- supports-color - supports-color
dev: true dev: true
/@typescript-eslint/experimental-utils/3.10.1_cgdknpc562nnyruteofhkegnom: /@typescript-eslint/experimental-utils/3.10.1_eslint@7.32.0+typescript@5.0.4:
resolution: {integrity: sha512-DewqIgscDzmAfd5nOGe4zm6Bl7PKtMG2Ad0KG8CUZAHlXfAKTF9Ol5PXhiMh39yRL2ChRH1cuuUGOcVyyrhQIw==} resolution: {integrity: sha512-DewqIgscDzmAfd5nOGe4zm6Bl7PKtMG2Ad0KG8CUZAHlXfAKTF9Ol5PXhiMh39yRL2ChRH1cuuUGOcVyyrhQIw==}
engines: {node: ^10.12.0 || >=12.0.0} engines: {node: ^10.12.0 || >=12.0.0}
peerDependencies: peerDependencies:
@ -983,7 +985,7 @@ packages:
- typescript - typescript
dev: true dev: true
/@typescript-eslint/parser/3.10.1_cgdknpc562nnyruteofhkegnom: /@typescript-eslint/parser/3.10.1_eslint@7.32.0+typescript@5.0.4:
resolution: {integrity: sha512-Ug1RcWcrJP02hmtaXVS3axPPTTPnZjupqhgj+NnZ6BCkwSImWk/283347+x9wN+lqOdK9Eo3vsyiyDHgsmiEJw==} resolution: {integrity: sha512-Ug1RcWcrJP02hmtaXVS3axPPTTPnZjupqhgj+NnZ6BCkwSImWk/283347+x9wN+lqOdK9Eo3vsyiyDHgsmiEJw==}
engines: {node: ^10.12.0 || >=12.0.0} engines: {node: ^10.12.0 || >=12.0.0}
peerDependencies: peerDependencies:
@ -994,7 +996,7 @@ packages:
optional: true optional: true
dependencies: dependencies:
'@types/eslint-visitor-keys': 1.0.0 '@types/eslint-visitor-keys': 1.0.0
'@typescript-eslint/experimental-utils': 3.10.1_cgdknpc562nnyruteofhkegnom '@typescript-eslint/experimental-utils': 3.10.1_eslint@7.32.0+typescript@5.0.4
'@typescript-eslint/types': 3.10.1 '@typescript-eslint/types': 3.10.1
'@typescript-eslint/typescript-estree': 3.10.1_typescript@5.0.4 '@typescript-eslint/typescript-estree': 3.10.1_typescript@5.0.4
eslint: 7.32.0 eslint: 7.32.0
@ -1118,10 +1120,16 @@ packages:
engines: {node: '>=10'} engines: {node: '>=10'}
dev: true dev: true
/antlr4-c3/3.0.1:
resolution: {integrity: sha512-Vbizas0WK4Id6l1f48ANYHFZgfzmj82LZx4OuB/a87vCpyoUwofrlJ+sCiL1qT/D2SWf8HEcBPCsgskdx8NnzQ==}
dependencies:
antlr4ts: 0.5.0-alpha.4
dev: false
/antlr4ts-cli/0.5.0-alpha.4: /antlr4ts-cli/0.5.0-alpha.4:
resolution: {integrity: sha512-lVPVBTA2CVHRYILSKilL6Jd4hAumhSZZWA7UbQNQrmaSSj7dPmmYaN4bOmZG79cOy0lS00i4LY68JZZjZMWVrw==} resolution: {integrity: sha512-lVPVBTA2CVHRYILSKilL6Jd4hAumhSZZWA7UbQNQrmaSSj7dPmmYaN4bOmZG79cOy0lS00i4LY68JZZjZMWVrw==}
hasBin: true hasBin: true
dev: false dev: true
/antlr4ts/0.5.0-alpha.4: /antlr4ts/0.5.0-alpha.4:
resolution: {integrity: sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ==} resolution: {integrity: sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ==}
@ -1535,8 +1543,8 @@ packages:
engines: {node: '>=10'} engines: {node: '>=10'}
hasBin: true hasBin: true
dependencies: dependencies:
JSONStream: 1.3.5
is-text-path: 1.0.1 is-text-path: 1.0.1
JSONStream: 1.3.5
lodash: 4.17.21 lodash: 4.17.21
meow: 8.1.2 meow: 8.1.2
split2: 3.2.2 split2: 3.2.2

View File

@ -9,12 +9,14 @@ statement
; ;
sqlStatements sqlStatements
: (sqlStatement SEMICOLON? | emptyStatement)* : (sqlStatement | emptyStatement)*
; ;
sqlStatement sqlStatement
: ddlStatement | dmlStatement | describeStatement | explainStatement | useStatement | showStatememt : ddlStatement SEMICOLON? | dmlStatement SEMICOLON? | describeStatement SEMICOLON?
| loadStatement | unloadStatememt | setStatememt | resetStatememt | jarStatememt | dtAddStatement | explainStatement SEMICOLON? | useStatement SEMICOLON?| showStatememt SEMICOLON?
| loadStatement SEMICOLON?| unloadStatememt SEMICOLON?| setStatememt SEMICOLON?
| resetStatememt SEMICOLON?| jarStatememt SEMICOLON?| dtAddStatement SEMICOLON?
; ;
emptyStatement emptyStatement
@ -33,7 +35,7 @@ dmlStatement
// some statemen // some statemen
describeStatement describeStatement
: (KW_DESCRIBE | KW_DESC) uid : (KW_DESCRIBE | KW_DESC) tablePath
; ;
explainStatement explainStatement
@ -49,7 +51,9 @@ explainDetail
; ;
useStatement useStatement
: KW_USE KW_CATALOG? uid | useModuleStatement : KW_USE KW_CATALOG catalogPath
| KW_USE databasePath
| useModuleStatement
; ;
useModuleStatement useModuleStatement
@ -59,7 +63,7 @@ useModuleStatement
showStatememt showStatememt
: KW_SHOW (KW_CATALOGS | KW_DATABASES | KW_VIEWS | KW_JARS) : KW_SHOW (KW_CATALOGS | KW_DATABASES | KW_VIEWS | KW_JARS)
| KW_SHOW KW_CURRENT (KW_CATALOG | KW_DATABASE) | KW_SHOW KW_CURRENT (KW_CATALOG | KW_DATABASE)
| KW_SHOW KW_TABLES (( KW_FROM | KW_IN ) uid)? likePredicate? | KW_SHOW KW_TABLES (( KW_FROM | KW_IN ) tablePath)? likePredicate?
| KW_SHOW KW_COLUMNS ( KW_FROM | KW_IN ) uid likePredicate? | KW_SHOW KW_COLUMNS ( KW_FROM | KW_IN ) uid likePredicate?
| KW_SHOW KW_CREATE (KW_TABLE | KW_VIEW) uid | KW_SHOW KW_CREATE (KW_TABLE | KW_VIEW) uid
| KW_SHOW KW_USER? KW_FUNCTIONS | KW_SHOW KW_USER? KW_FUNCTIONS
@ -106,7 +110,7 @@ createTable
; ;
simpleCreateTable simpleCreateTable
: KW_CREATE KW_TEMPORARY? KW_TABLE ifNotExists? sourceTable : KW_CREATE KW_TEMPORARY? KW_TABLE ifNotExists? tablePathCreate
LR_BRACKET LR_BRACKET
columnOptionDefinition (COMMA columnOptionDefinition)* columnOptionDefinition (COMMA columnOptionDefinition)*
(COMMA watermarkDefinition)? (COMMA watermarkDefinition)?
@ -124,7 +128,7 @@ simpleCreateTable
* CTAS 不支持指定显示指定列,不支持创建分区表,临时表 * CTAS 不支持指定显示指定列,不支持创建分区表,临时表
*/ */
createTableAsSelect createTableAsSelect
: KW_CREATE KW_TABLE ifNotExists? sourceTable withOption (KW_AS queryStatement)? : KW_CREATE KW_TABLE ifNotExists? tablePathCreate withOption (KW_AS queryStatement)?
; ;
columnOptionDefinition columnOptionDefinition
@ -244,11 +248,7 @@ transformArgument
; ;
likeDefinition likeDefinition
: KW_LIKE sourceTable (LR_BRACKET likeOption* RR_BRACKET)? : KW_LIKE tablePath (LR_BRACKET likeOption* RR_BRACKET)?
;
sourceTable
: uid
; ;
likeOption likeOption
@ -261,7 +261,7 @@ createCatalog
; ;
createDatabase createDatabase
: KW_CREATE KW_DATABASE ifNotExists? uid commentSpec? withOption : KW_CREATE KW_DATABASE ifNotExists? databasePathCreate commentSpec? withOption
; ;
createView createView
@ -269,7 +269,7 @@ createView
; ;
createFunction createFunction
: KW_CREATE (KW_TEMPORARY|KW_TEMPORARY KW_SYSTEM)? KW_FUNCTION ifNotExists? uid KW_AS identifier (KW_LANGUAGE (KW_JAVA|KW_SCALA|KW_PYTHON))? usingClause? : KW_CREATE (KW_TEMPORARY|KW_TEMPORARY KW_SYSTEM)? KW_FUNCTION ifNotExists? functionName KW_AS identifier (KW_LANGUAGE (KW_JAVA|KW_SCALA|KW_PYTHON))? usingClause?
; ;
usingClause usingClause
@ -285,7 +285,7 @@ jarFileName
// it only includes rename, set key, add constraint, drop constraint, add unique // it only includes rename, set key, add constraint, drop constraint, add unique
alterTable alterTable
: KW_ALTER KW_TABLE ifExists? uid (renameDefinition | setKeyValueDefinition | addConstraint | dropConstraint | addUnique) : KW_ALTER KW_TABLE ifExists? tablePath (renameDefinition | setKeyValueDefinition | addConstraint | dropConstraint | addUnique)
; ;
renameDefinition renameDefinition
@ -317,7 +317,7 @@ alertView
; ;
alterDatabase alterDatabase
: KW_ALTER KW_DATABASE uid setKeyValueDefinition : KW_ALTER KW_DATABASE databasePath setKeyValueDefinition
; ;
alterFunction alterFunction
@ -328,15 +328,15 @@ alterFunction
// Drop statements // Drop statements
dropCatalog dropCatalog
: KW_DROP KW_CATALOG ifExists? uid : KW_DROP KW_CATALOG ifExists? catalogPath
; ;
dropTable dropTable
: KW_DROP KW_TEMPORARY? KW_TABLE ifExists? uid : KW_DROP KW_TEMPORARY? KW_TABLE ifExists? tablePath
; ;
dropDatabase dropDatabase
: KW_DROP KW_DATABASE ifExists? uid dropType=(KW_RESTRICT | KW_CASCADE)? : KW_DROP KW_DATABASE ifExists? databasePath dropType=(KW_RESTRICT | KW_CASCADE)?
; ;
dropView dropView
@ -344,7 +344,7 @@ dropView
; ;
dropFunction dropFunction
: KW_DROP (KW_TEMPORARY|KW_TEMPORARY KW_SYSTEM)? KW_FUNCTION ifExists? uid : KW_DROP (KW_TEMPORARY|KW_TEMPORARY KW_SYSTEM)? KW_FUNCTION ifExists? functionName
; ;
@ -356,7 +356,7 @@ insertStatement
; ;
insertSimpleStatement insertSimpleStatement
: KW_INSERT (KW_INTO | KW_OVERWRITE) uid : KW_INSERT (KW_INTO | KW_OVERWRITE) tablePath
( (
insertPartitionDefinition? columnNameList? queryStatement insertPartitionDefinition? columnNameList? queryStatement
| valuesDefinition | valuesDefinition
@ -454,9 +454,7 @@ tablePrimary
| KW_UNNEST LR_BRACKET expression RR_BRACKET | KW_UNNEST LR_BRACKET expression RR_BRACKET
; ;
tablePath
: uid
;
systemTimePeriod systemTimePeriod
: KW_FOR KW_SYSTEM_TIME KW_AS KW_OF dateTimeExpression : KW_FOR KW_SYSTEM_TIME KW_AS KW_OF dateTimeExpression
@ -827,6 +825,26 @@ whenClause
: KW_WHEN condition=expression KW_THEN result=expression : KW_WHEN condition=expression KW_THEN result=expression
; ;
catalogPath
: uid
;
databasePath
: uid
;
databasePathCreate
: uid
;
tablePathCreate
: uid
;
tablePath
: uid
;
uid uid
: identifier (DOT identifier)*? : identifier (DOT identifier)*?
; ;

View File

@ -14,3 +14,6 @@ export * from './lib/pgsql/PostgreSQLParserListener';
export * from './lib/pgsql/PostgreSQLParserVisitor'; export * from './lib/pgsql/PostgreSQLParserVisitor';
export * from './lib/trinosql/TrinoSqlListener'; export * from './lib/trinosql/TrinoSqlListener';
export * from './lib/trinosql/TrinoSqlVisitor'; export * from './lib/trinosql/TrinoSqlVisitor';
export type * from './parser/common/basic-parser-types';
export type { SyntaxError, ParserError } from './parser/common/parserErrorListener';

View File

@ -1,4 +1,4 @@
// Generated from /Users/ziv/github.com/dt-sql-parser/src/grammar/flinksql/FlinkSqlLexer.g4 by ANTLR 4.9.0-SNAPSHOT // Generated from /Users/hayden/Desktop/dt-works/dt-sql-parser/src/grammar/flinksql/FlinkSqlLexer.g4 by ANTLR 4.9.0-SNAPSHOT
import { ATN } from "antlr4ts/atn/ATN"; import { ATN } from "antlr4ts/atn/ATN";

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
// Generated from /Users/ziv/github.com/dt-sql-parser/src/grammar/flinksql/FlinkSqlParser.g4 by ANTLR 4.9.0-SNAPSHOT // Generated from /Users/hayden/Desktop/dt-works/dt-sql-parser/src/grammar/flinksql/FlinkSqlParser.g4 by ANTLR 4.9.0-SNAPSHOT
import { ParseTreeListener } from "antlr4ts/tree/ParseTreeListener"; import { ParseTreeListener } from "antlr4ts/tree/ParseTreeListener";
@ -84,7 +84,6 @@ import { TransformListContext } from "./FlinkSqlParser";
import { TransformContext } from "./FlinkSqlParser"; import { TransformContext } from "./FlinkSqlParser";
import { TransformArgumentContext } from "./FlinkSqlParser"; import { TransformArgumentContext } from "./FlinkSqlParser";
import { LikeDefinitionContext } from "./FlinkSqlParser"; import { LikeDefinitionContext } from "./FlinkSqlParser";
import { SourceTableContext } from "./FlinkSqlParser";
import { LikeOptionContext } from "./FlinkSqlParser"; import { LikeOptionContext } from "./FlinkSqlParser";
import { CreateCatalogContext } from "./FlinkSqlParser"; import { CreateCatalogContext } from "./FlinkSqlParser";
import { CreateDatabaseContext } from "./FlinkSqlParser"; import { CreateDatabaseContext } from "./FlinkSqlParser";
@ -127,7 +126,6 @@ import { FromClauseContext } from "./FlinkSqlParser";
import { TableExpressionContext } from "./FlinkSqlParser"; import { TableExpressionContext } from "./FlinkSqlParser";
import { TableReferenceContext } from "./FlinkSqlParser"; import { TableReferenceContext } from "./FlinkSqlParser";
import { TablePrimaryContext } from "./FlinkSqlParser"; import { TablePrimaryContext } from "./FlinkSqlParser";
import { TablePathContext } from "./FlinkSqlParser";
import { SystemTimePeriodContext } from "./FlinkSqlParser"; import { SystemTimePeriodContext } from "./FlinkSqlParser";
import { DateTimeExpressionContext } from "./FlinkSqlParser"; import { DateTimeExpressionContext } from "./FlinkSqlParser";
import { InlineDataValueClauseContext } from "./FlinkSqlParser"; import { InlineDataValueClauseContext } from "./FlinkSqlParser";
@ -192,6 +190,11 @@ import { IdentifierContext } from "./FlinkSqlParser";
import { UnquotedIdentifierContext } from "./FlinkSqlParser"; import { UnquotedIdentifierContext } from "./FlinkSqlParser";
import { QuotedIdentifierContext } from "./FlinkSqlParser"; import { QuotedIdentifierContext } from "./FlinkSqlParser";
import { WhenClauseContext } from "./FlinkSqlParser"; import { WhenClauseContext } from "./FlinkSqlParser";
import { CatalogPathContext } from "./FlinkSqlParser";
import { DatabasePathContext } from "./FlinkSqlParser";
import { DatabasePathCreateContext } from "./FlinkSqlParser";
import { TablePathCreateContext } from "./FlinkSqlParser";
import { TablePathContext } from "./FlinkSqlParser";
import { UidContext } from "./FlinkSqlParser"; import { UidContext } from "./FlinkSqlParser";
import { WithOptionContext } from "./FlinkSqlParser"; import { WithOptionContext } from "./FlinkSqlParser";
import { IfNotExistsContext } from "./FlinkSqlParser"; import { IfNotExistsContext } from "./FlinkSqlParser";
@ -1177,17 +1180,6 @@ export interface FlinkSqlParserListener extends ParseTreeListener {
*/ */
exitLikeDefinition?: (ctx: LikeDefinitionContext) => void; exitLikeDefinition?: (ctx: LikeDefinitionContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.sourceTable`.
* @param ctx the parse tree
*/
enterSourceTable?: (ctx: SourceTableContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.sourceTable`.
* @param ctx the parse tree
*/
exitSourceTable?: (ctx: SourceTableContext) => void;
/** /**
* Enter a parse tree produced by `FlinkSqlParser.likeOption`. * Enter a parse tree produced by `FlinkSqlParser.likeOption`.
* @param ctx the parse tree * @param ctx the parse tree
@ -1650,17 +1642,6 @@ export interface FlinkSqlParserListener extends ParseTreeListener {
*/ */
exitTablePrimary?: (ctx: TablePrimaryContext) => void; exitTablePrimary?: (ctx: TablePrimaryContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
*/
enterTablePath?: (ctx: TablePathContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
*/
exitTablePath?: (ctx: TablePathContext) => void;
/** /**
* Enter a parse tree produced by `FlinkSqlParser.systemTimePeriod`. * Enter a parse tree produced by `FlinkSqlParser.systemTimePeriod`.
* @param ctx the parse tree * @param ctx the parse tree
@ -2365,6 +2346,61 @@ export interface FlinkSqlParserListener extends ParseTreeListener {
*/ */
exitWhenClause?: (ctx: WhenClauseContext) => void; exitWhenClause?: (ctx: WhenClauseContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.catalogPath`.
* @param ctx the parse tree
*/
enterCatalogPath?: (ctx: CatalogPathContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.catalogPath`.
* @param ctx the parse tree
*/
exitCatalogPath?: (ctx: CatalogPathContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.databasePath`.
* @param ctx the parse tree
*/
enterDatabasePath?: (ctx: DatabasePathContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.databasePath`.
* @param ctx the parse tree
*/
exitDatabasePath?: (ctx: DatabasePathContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.databasePathCreate`.
* @param ctx the parse tree
*/
enterDatabasePathCreate?: (ctx: DatabasePathCreateContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.databasePathCreate`.
* @param ctx the parse tree
*/
exitDatabasePathCreate?: (ctx: DatabasePathCreateContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.tablePathCreate`.
* @param ctx the parse tree
*/
enterTablePathCreate?: (ctx: TablePathCreateContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.tablePathCreate`.
* @param ctx the parse tree
*/
exitTablePathCreate?: (ctx: TablePathCreateContext) => void;
/**
* Enter a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
*/
enterTablePath?: (ctx: TablePathContext) => void;
/**
* Exit a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
*/
exitTablePath?: (ctx: TablePathContext) => void;
/** /**
* Enter a parse tree produced by `FlinkSqlParser.uid`. * Enter a parse tree produced by `FlinkSqlParser.uid`.
* @param ctx the parse tree * @param ctx the parse tree

View File

@ -1,4 +1,4 @@
// Generated from /Users/ziv/github.com/dt-sql-parser/src/grammar/flinksql/FlinkSqlParser.g4 by ANTLR 4.9.0-SNAPSHOT // Generated from /Users/hayden/Desktop/dt-works/dt-sql-parser/src/grammar/flinksql/FlinkSqlParser.g4 by ANTLR 4.9.0-SNAPSHOT
import { ParseTreeVisitor } from "antlr4ts/tree/ParseTreeVisitor"; import { ParseTreeVisitor } from "antlr4ts/tree/ParseTreeVisitor";
@ -84,7 +84,6 @@ import { TransformListContext } from "./FlinkSqlParser";
import { TransformContext } from "./FlinkSqlParser"; import { TransformContext } from "./FlinkSqlParser";
import { TransformArgumentContext } from "./FlinkSqlParser"; import { TransformArgumentContext } from "./FlinkSqlParser";
import { LikeDefinitionContext } from "./FlinkSqlParser"; import { LikeDefinitionContext } from "./FlinkSqlParser";
import { SourceTableContext } from "./FlinkSqlParser";
import { LikeOptionContext } from "./FlinkSqlParser"; import { LikeOptionContext } from "./FlinkSqlParser";
import { CreateCatalogContext } from "./FlinkSqlParser"; import { CreateCatalogContext } from "./FlinkSqlParser";
import { CreateDatabaseContext } from "./FlinkSqlParser"; import { CreateDatabaseContext } from "./FlinkSqlParser";
@ -127,7 +126,6 @@ import { FromClauseContext } from "./FlinkSqlParser";
import { TableExpressionContext } from "./FlinkSqlParser"; import { TableExpressionContext } from "./FlinkSqlParser";
import { TableReferenceContext } from "./FlinkSqlParser"; import { TableReferenceContext } from "./FlinkSqlParser";
import { TablePrimaryContext } from "./FlinkSqlParser"; import { TablePrimaryContext } from "./FlinkSqlParser";
import { TablePathContext } from "./FlinkSqlParser";
import { SystemTimePeriodContext } from "./FlinkSqlParser"; import { SystemTimePeriodContext } from "./FlinkSqlParser";
import { DateTimeExpressionContext } from "./FlinkSqlParser"; import { DateTimeExpressionContext } from "./FlinkSqlParser";
import { InlineDataValueClauseContext } from "./FlinkSqlParser"; import { InlineDataValueClauseContext } from "./FlinkSqlParser";
@ -192,6 +190,11 @@ import { IdentifierContext } from "./FlinkSqlParser";
import { UnquotedIdentifierContext } from "./FlinkSqlParser"; import { UnquotedIdentifierContext } from "./FlinkSqlParser";
import { QuotedIdentifierContext } from "./FlinkSqlParser"; import { QuotedIdentifierContext } from "./FlinkSqlParser";
import { WhenClauseContext } from "./FlinkSqlParser"; import { WhenClauseContext } from "./FlinkSqlParser";
import { CatalogPathContext } from "./FlinkSqlParser";
import { DatabasePathContext } from "./FlinkSqlParser";
import { DatabasePathCreateContext } from "./FlinkSqlParser";
import { TablePathCreateContext } from "./FlinkSqlParser";
import { TablePathContext } from "./FlinkSqlParser";
import { UidContext } from "./FlinkSqlParser"; import { UidContext } from "./FlinkSqlParser";
import { WithOptionContext } from "./FlinkSqlParser"; import { WithOptionContext } from "./FlinkSqlParser";
import { IfNotExistsContext } from "./FlinkSqlParser"; import { IfNotExistsContext } from "./FlinkSqlParser";
@ -825,13 +828,6 @@ export interface FlinkSqlParserVisitor<Result> extends ParseTreeVisitor<Result>
*/ */
visitLikeDefinition?: (ctx: LikeDefinitionContext) => Result; visitLikeDefinition?: (ctx: LikeDefinitionContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.sourceTable`.
* @param ctx the parse tree
* @return the visitor result
*/
visitSourceTable?: (ctx: SourceTableContext) => Result;
/** /**
* Visit a parse tree produced by `FlinkSqlParser.likeOption`. * Visit a parse tree produced by `FlinkSqlParser.likeOption`.
* @param ctx the parse tree * @param ctx the parse tree
@ -1126,13 +1122,6 @@ export interface FlinkSqlParserVisitor<Result> extends ParseTreeVisitor<Result>
*/ */
visitTablePrimary?: (ctx: TablePrimaryContext) => Result; visitTablePrimary?: (ctx: TablePrimaryContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
* @return the visitor result
*/
visitTablePath?: (ctx: TablePathContext) => Result;
/** /**
* Visit a parse tree produced by `FlinkSqlParser.systemTimePeriod`. * Visit a parse tree produced by `FlinkSqlParser.systemTimePeriod`.
* @param ctx the parse tree * @param ctx the parse tree
@ -1581,6 +1570,41 @@ export interface FlinkSqlParserVisitor<Result> extends ParseTreeVisitor<Result>
*/ */
visitWhenClause?: (ctx: WhenClauseContext) => Result; visitWhenClause?: (ctx: WhenClauseContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.catalogPath`.
* @param ctx the parse tree
* @return the visitor result
*/
visitCatalogPath?: (ctx: CatalogPathContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.databasePath`.
* @param ctx the parse tree
* @return the visitor result
*/
visitDatabasePath?: (ctx: DatabasePathContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.databasePathCreate`.
* @param ctx the parse tree
* @return the visitor result
*/
visitDatabasePathCreate?: (ctx: DatabasePathCreateContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.tablePathCreate`.
* @param ctx the parse tree
* @return the visitor result
*/
visitTablePathCreate?: (ctx: TablePathCreateContext) => Result;
/**
* Visit a parse tree produced by `FlinkSqlParser.tablePath`.
* @param ctx the parse tree
* @return the visitor result
*/
visitTablePath?: (ctx: TablePathContext) => Result;
/** /**
* Visit a parse tree produced by `FlinkSqlParser.uid`. * Visit a parse tree produced by `FlinkSqlParser.uid`.
* @param ctx the parse tree * @param ctx the parse tree

View File

@ -0,0 +1,71 @@
/**
* The insertion position of the candidate list.
* Such as cursor position
*/
export interface CaretPosition {
/** start at 1 */
lineNumber: number;
/** start at 1 */
column: number;
}
/**
* Syntax context type at caret position
*/
export enum SyntaxContextType {
/** catalog name */
CATALOG = 'catalog',
/** database name path, such as catalog.db */
DATABASE = 'database',
/** database name path will be created */
DATABASE_CREATE = 'databaseCreate',
/** table name path, such as catalog.db.tb */
TABLE = 'table',
/** table name path will be created */
TABLE_CREATE = 'tableCreate'
}
export interface WordRange {
/** content of word */
text: string;
/** start at 0 */
startIndex: number;
stopIndex: number;
/** start at 1 */
line: number;
/** start at 1 */
startColumn: number;
stopColumn: number;
}
/**
* Suggested information analyzed from the input
*/
export interface SyntaxSuggestion<T = WordRange> {
syntaxContextType: SyntaxContextType;
wordRanges: T[];
}
/**
* Suggested information analyzed from the input
*/
export interface Suggestions<T = WordRange> {
/**
* Suggestions about syntax
*/
syntax: SyntaxSuggestion<T>[];
/**
* Suggestions about keywords
*/
keywords: string[];
}
export interface TextSlice {
startIndex: number;
endIndex: number;
startLine: number;
endLine: number;
startColumn: number;
endColumn: number;
text: string;
}

View File

@ -1,86 +1,164 @@
import { Parser } from 'antlr4ts'; import {
import { ParseTreeWalker } from 'antlr4ts/tree'; Parser,
Lexer,
Token,
CharStreams,
CommonTokenStream,
CodePointCharStream,
ParserRuleContext
} from 'antlr4ts';
import { ParseTreeWalker, ParseTreeListener } from 'antlr4ts/tree';
import { CandidatesCollection, CodeCompletionCore } from 'antlr4-c3';
import { findCaretTokenIndex } from '../../utils/findCaretTokenIndex';
import {
CaretPosition,
Suggestions,
SyntaxSuggestion,
WordRange,
TextSlice
} from './basic-parser-types';
import ParserErrorListener, { import ParserErrorListener, {
ParserError, ParserError,
ErrorHandler, ErrorHandler,
ParserErrorCollector, ParserErrorCollector,
} from './parserErrorListener'; } from './parserErrorListener';
interface IParser extends Parser { interface IParser<IParserRuleContext extends ParserRuleContext> extends Parser {
// Lost in type definition
ruleNames: string[];
// Customized in our parser // Customized in our parser
program(): any; program(): IParserRuleContext;
}
interface SplitListener extends ParseTreeListener {
statementsContext: ParserRuleContext[];
} }
/** /**
* Custom Parser class, subclass needs extends it. * Custom Parser class, subclass needs extends it.
*/ */
export default abstract class BasicParser { export default abstract class BasicParser<
private _parser: IParser; L extends Lexer = Lexer,
PRC extends ParserRuleContext = ParserRuleContext,
public parse( P extends IParser<PRC> = IParser<PRC>
input: string, > {
errorListener?: ErrorHandler<any>, protected _charStreams: CodePointCharStream;
) { protected _lexer: L;
const parser = this.createParser(input); protected _tokenStream: CommonTokenStream;
this._parser = parser; protected _parser: P;
protected _parserTree: PRC;
parser.removeErrorListeners(); protected _errorCollector: ParserErrorCollector = new ParserErrorCollector();
parser.addErrorListener(new ParserErrorListener(errorListener)); protected _parsedInput: string = null;
const parserTree = parser.program();
return parserTree;
}
public validate(input: string): ParserError[] {
const lexerError = []; const syntaxErrors = [];
const parser = this.createParser(input);
this._parser = parser;
parser.removeErrorListeners();
parser.addErrorListener(new ParserErrorCollector(syntaxErrors));
parser.program();
return lexerError.concat(syntaxErrors);
}
/** /**
* Create antrl4 Lexer object * preferredRules for antlr4-c3
*/
public abstract preferredRules: Set<number>;
/**
* Create antrl4 Lexer instance
* @param input source string * @param input source string
*/ */
public abstract createLexer(input: string); public abstract createLexerFormCharStream(charStreams: CodePointCharStream): L;
/** /**
* Create Parser by lexer * Create Parser by CommonTokenStream
* @param lexer Lexer * @param tokenStream CommonTokenStream
*/ */
public abstract createParserFromLexer(lexer); public abstract createParserFromTokenStream(tokenStream: CommonTokenStream): P;
/** /**
* Get all Tokens of input string * convert candidates to suggestions
* @param input string * @param candidates candidate list
* @param allTokens all tokens from input
* @param caretTokenIndex tokenIndex of caretPosition
* @param tokenIndexOffset offset of the tokenIndex in the candidates
* compared to the tokenIndex in allTokens
*/
public abstract processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
tokenIndexOffset: number,
): Suggestions<Token>;
/**
* splitListener instance Getter
*/
protected abstract get splitListener (): SplitListener;
/**
* If it is invoked multiple times in a row and the input parameters is the same
* this method returns the parsing result directly for the first time
* unless the errorListener parameter is passed
* @param input source string
* @param errorListener listen errors
* @returns parserTree
*/
public parse(
input: string,
errorListener?: ErrorHandler<any>
) {
// Avoid parsing the same input repeatedly
if(this._parsedInput === input && !errorListener) {
return;
}
const parser = this.createParser(input);
this._parsedInput = input;
parser.removeErrorListeners();
this._errorCollector.clear();
parser.addErrorListener(this._errorCollector);
if(errorListener) {
parser.addErrorListener(new ParserErrorListener(errorListener));
}
this._parserTree = parser.program();
return this._parserTree;
}
/**
* validate input string and return syntax errors
* @param input source string
* @returns syntax errors
*/
public validate(input: string): ParserError[] {
this.parse(input);
const lexerError = [];
return lexerError.concat(this._errorCollector.parserErrors);
}
/**
* Get all Tokens of input string'<EOF>' is not included
* @param input source string
* @returns Token[] * @returns Token[]
*/ */
public getAllTokens(input: string): string[] { public getAllTokens(input: string): Token[] {
const lexer = this.createLexer(input); this.parse(input);
return lexer.getAllTokens().map(token => token.text); let allTokens = this._tokenStream.getTokens();
if(allTokens[allTokens.length - 1].text === '<EOF>') {
allTokens = allTokens.slice(0, -1)
}
return allTokens
}; };
/** /**
* Get Parser instance by input string * Get Parser instance by input string
* @param input * @param input string
*/ */
public createParser(input: string): IParser { public createParser(input: string): P {
const lexer = this.createLexer(input); this._parserTree = null;
const parser: any = this.createParserFromLexer(lexer); this._charStreams = CharStreams.fromString(input.toUpperCase());
parser.buildParseTrees = true; this._lexer = this.createLexerFormCharStream(this._charStreams);
this._parser = parser;
return parser; this._tokenStream = new CommonTokenStream(this._lexer);
this._tokenStream.fill();
this._parser = this.createParserFromTokenStream(this._tokenStream);
this._parser.buildParseTree = true;
return this._parser
} }
/** /**
@ -88,18 +166,15 @@ export default abstract class BasicParser {
* @param string input * @param string input
*/ */
public parserTreeToString(input: string): string { public parserTreeToString(input: string): string {
const parser = this.createParser(input); this.parse(input);
this._parser = parser; return this._parserTree.toStringTree(this._parser.ruleNames);
const tree = parser.program();
return tree.toStringTree(parser.ruleNames);
} }
/** /**
* Get List-like style tree string * Get List-like style tree string
* @param parserTree * @param parserTree ProgramRuleContext
*/ */
public toString(parserTree: any): string { public toString(parserTree: PRC): string {
return parserTree.toStringTree(this._parser.ruleNames); return parserTree.toStringTree(this._parser.ruleNames);
} }
@ -107,7 +182,130 @@ export default abstract class BasicParser {
* @param listener Listener instance extends ParserListener * @param listener Listener instance extends ParserListener
* @param parserTree parser Tree * @param parserTree parser Tree
*/ */
public listen(listener: any, parserTree: any) { public listen<PTL extends ParseTreeListener = ParseTreeListener>(listener: PTL, parserTree: PRC) {
ParseTreeWalker.DEFAULT.walk(listener, parserTree); ParseTreeWalker.DEFAULT.walk(listener, parserTree);
} }
/**
* split input into statements
* @param input source string
*/
public splitSQL(input): TextSlice[] {
this.parse(input);
const splitListener = this.splitListener;
this.listen(splitListener, this._parserTree);
const res = splitListener.statementsContext.map(context => {
const { start, stop } = context;
return {
startIndex: start.startIndex,
endIndex: stop.stopIndex,
startLine: start.line,
endLine: stop.line,
startColumn: start.charPositionInLine + 1,
endColumn: stop.charPositionInLine + stop.text.length,
text: this._parsedInput.slice(start.startIndex, stop.stopIndex + 1),
}
})
return res;
}
/**
* Get suggestions of syntax and token at caretPosition
* @param input source string
* @param caretPosition caret position, such as cursor position
* @returns suggestion
*/
public getSuggestionAtCaretPosition(input: string, caretPosition: CaretPosition): Suggestions | null {
const splitListener = this.splitListener;
// TODO: add splitListener to all sqlParser implements add remove following if
if(!splitListener) return null;
this.parse(input);
let sqlParserIns = this._parser;
let allTokens = this.getAllTokens(input);
let caretTokenIndex = findCaretTokenIndex(caretPosition, allTokens);
let c3Context: ParserRuleContext = this._parserTree;
let tokenIndexOffset: number = 0;
if(!caretTokenIndex && caretTokenIndex !== 0) return null;
/**
* Split sql by statement.
* Try to collect candidates from the caret statement only.
*/
this.listen(splitListener, this._parserTree);
// If there are multiple statements.
if (splitListener.statementsContext.length) {
// find statement rule context where caretPosition is located.
const caretStatementContext = splitListener?.statementsContext.find(ctx => {
return caretTokenIndex <= ctx.stop?.tokenIndex && caretTokenIndex >= ctx.start.tokenIndex;
});
if(caretStatementContext) {
c3Context = caretStatementContext
} else {
const lastIndex = splitListener.statementsContext.length > 1
? 2
: 1;
const lastStatementToken= splitListener
.statementsContext[splitListener?.statementsContext.length - lastIndex]
.stop;
/**
* If caretStatementContext is not found and it follows all statements.
* Reparses part of the input following the penultimate statement.
* And c3 will collect candidates in the new parserTreeContext.
*/
if (caretTokenIndex > lastStatementToken?.tokenIndex) {
/**
* Save offset of the tokenIndex in the partInput
* compared to the tokenIndex in the whole input
*/
tokenIndexOffset = lastStatementToken?.tokenIndex + 1;
// Correct caretTokenIndex
caretTokenIndex = caretTokenIndex - tokenIndexOffset;
const inputSlice = input.slice(lastStatementToken.stopIndex + 1);
const charStreams = CharStreams.fromString(inputSlice.toUpperCase());
const lexer = this.createLexerFormCharStream(charStreams);
const tokenStream = new CommonTokenStream(lexer);
tokenStream.fill();
const parser = this.createParserFromTokenStream(tokenStream);
parser.buildParseTree = true;
sqlParserIns = parser;
c3Context = parser.program();
}
}
}
const core = new CodeCompletionCore(sqlParserIns);
core.preferredRules = this.preferredRules;
const candidates = core.collectCandidates(caretTokenIndex, c3Context);
const originalSuggestions = this.processCandidates(candidates, allTokens, caretTokenIndex, tokenIndexOffset);
const syntaxSuggestions: SyntaxSuggestion<WordRange>[] = originalSuggestions.syntax
.map(syntaxCtx => {
const wordRanges: WordRange[] = syntaxCtx.wordRanges.map(token => {
return {
text: this._parsedInput.slice(token.startIndex, token.stopIndex + 1),
startIndex: token.startIndex,
stopIndex: token.stopIndex,
line: token.line,
startColumn: token.charPositionInLine + 1,
stopColumn: token.charPositionInLine + token.text.length
}
})
return {
syntaxContextType: syntaxCtx.syntaxContextType,
wordRanges,
}
})
return {
syntax: syntaxSuggestions,
keywords: originalSuggestions.keywords
}
}
} }

View File

@ -1,4 +1,6 @@
import { Token, Recognizer, ParserErrorListener, RecognitionException } from 'antlr4ts'; import { Token, Recognizer, ParserErrorListener, RecognitionException } from 'antlr4ts';
import { ATNSimulator } from 'antlr4ts/atn/ATNSimulator'
export interface ParserError { export interface ParserError {
startLine: number; startLine: number;
endLine: number; endLine: number;
@ -8,56 +10,70 @@ export interface ParserError {
} }
export interface SyntaxError<T> { export interface SyntaxError<T> {
recognizer: Recognizer<T, any>; recognizer: Recognizer<T, ATNSimulator>;
offendingSymbol: Token; offendingSymbol: Token;
line: number; line: number;
charPositionInLine: number; charPositionInLine: number;
msg: string; msg: string;
e: any; e: RecognitionException;
} }
type ErrorOffendingSymbol = {
text: string;
};
export type ErrorHandler<T> = (err: ParserError, errOption: SyntaxError<T>) => void; export type ErrorHandler<T> = (err: ParserError, errOption: SyntaxError<T>) => void;
export class ParserErrorCollector implements ParserErrorListener { export class ParserErrorCollector implements ParserErrorListener {
private _errors: ParserError[]; private _parseErrors: ParserError[] = [];
private _syntaxErrors: SyntaxError<Token>[] = [];
constructor(error: ParserError[]) {
this._errors = error;
}
syntaxError( syntaxError(
recognizer: Recognizer<ErrorOffendingSymbol, any>, offendingSymbol: ErrorOffendingSymbol, line: number, recognizer: Recognizer<Token, ATNSimulator>,
charPositionInLine: number, msg: string, e: RecognitionException, offendingSymbol: Token,
line: number,
charPositionInLine: number,
msg: string,
e: RecognitionException,
) { ) {
let endCol = charPositionInLine + 1; let endCol = charPositionInLine + 1;
if (offendingSymbol && offendingSymbol.text !== null) { if (offendingSymbol && offendingSymbol.text !== null) {
endCol = charPositionInLine + offendingSymbol.text.length; endCol = charPositionInLine + offendingSymbol.text.length;
} }
this._errors.push({ this._parseErrors.push({
startLine: line, startLine: line,
endLine: line, endLine: line,
startCol: charPositionInLine, startCol: charPositionInLine,
endCol: endCol, endCol: endCol,
message: msg, message: msg,
}); });
}
this._syntaxErrors.push({
e,
line,
msg,
recognizer,
offendingSymbol,
charPositionInLine,
})
} }
clear() {
this._parseErrors = [];
this._syntaxErrors = [];
}
get parserErrors () {
return this._parseErrors
}
}
export default class CustomParserErrorListener implements ParserErrorListener { export default class CustomParserErrorListener implements ParserErrorListener {
private _errorHandler; private _errorHandler;
constructor(errorListener: ErrorHandler<ErrorOffendingSymbol>) { constructor(errorListener: ErrorHandler<Token>) {
this._errorHandler = errorListener; this._errorHandler = errorListener;
} }
syntaxError( syntaxError(
recognizer: Recognizer<ErrorOffendingSymbol, any>, offendingSymbol: ErrorOffendingSymbol, line: number, recognizer: Recognizer<Token, ATNSimulator>, offendingSymbol: Token, line: number,
charPositionInLine: number, msg: string, e: any, charPositionInLine: number, msg: string, e: RecognitionException,
) { ) {
let endCol = charPositionInLine + 1; let endCol = charPositionInLine + 1;
if (offendingSymbol && offendingSymbol.text !== null) { if (offendingSymbol && offendingSymbol.text !== null) {
@ -81,4 +97,3 @@ export default class CustomParserErrorListener implements ParserErrorListener {
} }
} }
} }

View File

@ -1,17 +1,115 @@
import { CharStreams, CommonTokenStream } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { FlinkSqlLexer } from '../lib/flinksql/FlinkSqlLexer'; import { FlinkSqlLexer } from '../lib/flinksql/FlinkSqlLexer';
import { FlinkSqlParser } from '../lib/flinksql/FlinkSqlParser'; import {
FlinkSqlParser,
ProgramContext,
SqlStatementContext,
SqlStatementsContext
} from '../lib/flinksql/FlinkSqlParser';
import { FlinkSqlParserListener } from 'src/lib/flinksql/FlinkSqlParserListener';
import { SyntaxContextType, Suggestions, SyntaxSuggestion } from './common/basic-parser-types';
import BasicParser from './common/basicParser'; import BasicParser from './common/basicParser';
export default class FlinkSQL extends BasicParser {
public createLexer(input: string): FlinkSqlLexer { export default class FlinkSQL extends BasicParser<FlinkSqlLexer, ProgramContext, FlinkSqlParser> {
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform public createLexerFormCharStream(charStreams) {
const lexer = new FlinkSqlLexer(chars); const lexer = new FlinkSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: FlinkSqlLexer): FlinkSqlParser {
const tokens = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
const parser = new FlinkSqlParser(tokens); const parser = new FlinkSqlParser(tokenStream);
return parser; return parser;
} }
public preferredRules = new Set([
FlinkSqlParser.RULE_tablePath, // table name >> select / insert ...
FlinkSqlParser.RULE_tablePathCreate, // table name >> create
FlinkSqlParser.RULE_databasePath, // database name >> show
FlinkSqlParser.RULE_databasePathCreate, // database name >> create
FlinkSqlParser.RULE_catalogPath, // catalog name
]);
protected get splitListener () {
return new FlinkSqlSplitListener();
}
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number,
tokenIndexOffset: number
): Suggestions<Token> {
const originalSyntaxSuggestions: SyntaxSuggestion<Token>[] = [];
const keywords: string[] = [];
for (let candidate of candidates.rules) {
const [ruleType, candidateRule] = candidate;
const startTokenIndex = candidateRule.startTokenIndex + tokenIndexOffset;
const tokenRanges = allTokens.slice(startTokenIndex, caretTokenIndex + 1);
let syntaxContextType: SyntaxContextType;
switch (ruleType) {
case FlinkSqlParser.RULE_tablePath: {
syntaxContextType = SyntaxContextType.TABLE;
break;
}
case FlinkSqlParser.RULE_tablePathCreate: {
syntaxContextType = SyntaxContextType.TABLE_CREATE;
break;
}
case FlinkSqlParser.RULE_databasePath: {
syntaxContextType = SyntaxContextType.DATABASE;
break;
}
case FlinkSqlParser.RULE_databasePathCreate: {
syntaxContextType = SyntaxContextType.DATABASE;
break;
}
case FlinkSqlParser.RULE_catalogPath: {
syntaxContextType = SyntaxContextType.CATALOG;
break;
}
default:
break;
}
if (syntaxContextType) {
originalSyntaxSuggestions.push({
syntaxContextType,
wordRanges: tokenRanges
})
}
} }
for (let candidate of candidates.tokens) {
const symbolicName = this._parser.vocabulary.getSymbolicName(candidate[0]);
const displayName = this._parser.vocabulary.getDisplayName(candidate[0]);
if(symbolicName && symbolicName.startsWith('KW_')) {
const keyword = displayName.startsWith("'") && displayName.endsWith("'")
? displayName.slice(1, -1)
: displayName
keywords.push(keyword);
}
}
return {
syntax: originalSyntaxSuggestions,
keywords,
}
}
}
export class FlinkSqlSplitListener implements FlinkSqlParserListener {
private _statementsContext: SqlStatementContext[] = [];
exitSqlStatement = (ctx: SqlStatementContext) => {
this._statementsContext.push(ctx);
}
enterSqlStatements = (ctx: SqlStatementsContext) => {
};
get statementsContext () {
return this._statementsContext;
}
}

View File

@ -1,17 +1,35 @@
import { CharStreams, CommonTokenStream } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { SqlLexer } from '../lib/generic/SqlLexer'; import { SqlLexer } from '../lib/generic/SqlLexer';
import { SqlParser } from '../lib/generic/SqlParser'; import { SqlParser, ProgramContext } from '../lib/generic/SqlParser';
import BasicParser from './common/basicParser'; import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class GenericSQL extends BasicParser { export default class GenericSQL extends BasicParser<SqlLexer, ProgramContext, SqlParser> {
public createLexer(input: string): SqlLexer { public createLexerFormCharStream(charStreams): SqlLexer {
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform const lexer = new SqlLexer(charStreams);
const lexer = new SqlLexer(chars);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: SqlLexer): SqlParser {
const tokenStream = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream): SqlParser {
return new SqlParser(tokenStream); return new SqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -1,17 +1,36 @@
import { CharStreams, CommonTokenStream } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer'; import { HiveSqlLexer } from '../lib/hive/HiveSqlLexer';
import { HiveSql } from '../lib/hive/HiveSql'; import { HiveSql, ProgramContext } from '../lib/hive/HiveSql';
import BasicParser from './common/basicParser'; import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class HiveSQL extends BasicParser {
public createLexer(input: string): HiveSqlLexer { export default class HiveSQL extends BasicParser<HiveSqlLexer, ProgramContext, HiveSql> {
const chars = CharStreams.fromString(input.toUpperCase()); public createLexerFormCharStream(charStreams) {
const lexer = new HiveSqlLexer(chars); const lexer = new HiveSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: HiveSqlLexer): HiveSql {
const tokenStream = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
return new HiveSql(tokenStream); return new HiveSql(tokenStream);
} }
protected get splitListener () {
return null as any;
}
public preferredRules: Set<number> = new Set();
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -1,17 +1,34 @@
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import BasicParser from './common/basicParser';
import { PostgreSQLLexer } from '../lib/pgsql/PostgreSQLLexer'; import { PostgreSQLLexer } from '../lib/pgsql/PostgreSQLLexer';
import { PostgreSQLParser } from '../lib/pgsql/PostgreSQLParser'; import { PostgreSQLParser, ProgramContext } from '../lib/pgsql/PostgreSQLParser';
import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class PostgresSQL extends BasicParser { export default class PostgresSQL extends BasicParser<PostgreSQLLexer, ProgramContext, PostgreSQLParser> {
public createLexer(input: string): PostgreSQLLexer { public createLexerFormCharStream(charStreams) {
const chars = CharStreams.fromString(input.toUpperCase()); const lexer = new PostgreSQLLexer(charStreams);
const lexer = new PostgreSQLLexer(chars);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: Lexer): PostgreSQLParser {
const tokenStream = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
return new PostgreSQLParser(tokenStream); return new PostgreSQLParser(tokenStream);
} }
public preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -1,17 +1,34 @@
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import BasicParser from './common/basicParser';
import { PlSqlLexer } from '../lib/plsql/PlSqlLexer'; import { PlSqlLexer } from '../lib/plsql/PlSqlLexer';
import { PlSqlParser } from '../lib/plsql/PlSqlParser'; import { PlSqlParser, ProgramContext } from '../lib/plsql/PlSqlParser';
import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class PLSQLParser extends BasicParser { export default class PLSQL extends BasicParser<PlSqlLexer, ProgramContext, PlSqlParser> {
public createLexer(input: string): PlSqlLexer { public createLexerFormCharStream(charStreams) {
const chars = CharStreams.fromString(input.toUpperCase()); const lexer = new PlSqlLexer(charStreams);
const lexer = new PlSqlLexer(chars);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: Lexer): PlSqlParser {
const tokenStream = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
return new PlSqlParser(tokenStream); return new PlSqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -1,16 +1,34 @@
import { CharStreams, CommonTokenStream } from 'antlr4ts'; import { Token } from 'antlr4ts';
import BasicParser from './common/basicParser'; import { CandidatesCollection } from 'antlr4-c3';
import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer'; import { SparkSqlLexer } from '../lib/spark/SparkSqlLexer';
import { SparkSqlParser } from '../lib/spark/SparkSqlParser'; import { SparkSqlParser, ProgramContext } from '../lib/spark/SparkSqlParser';
import BasicParser from './common/basicParser';
import { Suggestions } from './common/basic-parser-types';
export default class SparkSQL extends BasicParser { export default class SparkSQL extends BasicParser<SparkSqlLexer, ProgramContext, SparkSqlParser> {
public createLexer(input: string): SparkSqlLexer { public createLexerFormCharStream(charStreams) {
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform const lexer = new SparkSqlLexer(charStreams);
const lexer = new SparkSqlLexer(chars);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: SparkSqlLexer): SparkSqlParser {
const tokenStream = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
return new SparkSqlParser(tokenStream); return new SparkSqlParser(tokenStream);
} }
public preferredRules: Set<number> = new Set();
protected get splitListener () {
return null as any;
}
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -1,17 +1,36 @@
import { CharStreams, CommonTokenStream, Lexer } from 'antlr4ts'; import { Token } from 'antlr4ts';
import { CandidatesCollection } from 'antlr4-c3';
import { TrinoSqlLexer } from '../lib/trinosql/TrinoSqlLexer'; import { TrinoSqlLexer } from '../lib/trinosql/TrinoSqlLexer';
import { TrinoSqlParser } from '../lib/trinosql/TrinoSqlParser'; import { TrinoSqlParser, ProgramContext } from '../lib/trinosql/TrinoSqlParser';
import BasicParser from './common/basicParser'; import BasicParser from './common/basicParser';
export default class trinoSQL extends BasicParser { import { Suggestions } from './common/basic-parser-types';
public createLexer(input: string): TrinoSqlLexer {
const chars = CharStreams.fromString(input.toUpperCase()); // Some Lexer only support uppercase token, So you need transform export default class TrinoSQL extends BasicParser<TrinoSqlLexer, ProgramContext, TrinoSqlParser> {
const lexer = new TrinoSqlLexer(chars); public createLexerFormCharStream(charStreams) {
const lexer = new TrinoSqlLexer(charStreams);
return lexer; return lexer;
} }
public createParserFromLexer(lexer: Lexer): TrinoSqlParser {
const tokens = new CommonTokenStream(lexer); public createParserFromTokenStream(tokenStream) {
const parser = new TrinoSqlParser(tokens); const parser = new TrinoSqlParser(tokenStream);
return parser; return parser;
} }
protected get splitListener () {
return null as any;
}
public preferredRules: Set<number> = new Set();
public processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: []
}
}
} }

View File

@ -0,0 +1,33 @@
import { Token } from "antlr4ts";
import { CaretPosition } from "../../src/parser/common/basic-parser-types";
/**
* find token index via caret position (cursor position)
* @param caretPosition
* @param allTokens all the tokens
* @returns caretTokenIndex
*/
export function findCaretTokenIndex(caretPosition: CaretPosition, allTokens: Token[]) {
const { lineNumber: caretLine, column: caretCol } = caretPosition;
let left = 0;
let right = allTokens.length - 1;
while(left <= right) {
const mid = left + ((right - left) >> 1);
const token = allTokens[mid];
if (token.line > caretLine || (
token.line === caretLine
&& token.charPositionInLine + 1 >= caretCol
)) {
right = mid - 1;
} else if (token.line < caretLine || (
token.line === caretLine
&& token.charPositionInLine + token.text.length + 1 < caretCol
)) {
left = mid + 1;
} else {
return allTokens[mid].tokenIndex
}
}
return null;
}

View File

@ -0,0 +1,11 @@
INSERT INTO cat.db.tb
SELECT * FROM cat.db
CREATE TABLE cat.db ;
SHOW TABLES FROM cat
ALTER DATABASE cat.
USE DATABASE cat.

View File

@ -0,0 +1,7 @@
SELECT * FROM aa.bb;
USE
;
CREATE
;
SHOW

View File

@ -0,0 +1,90 @@
import fs from 'fs';
import path from 'path';
import { CaretPosition, SyntaxContextType } from '../../../../src/parser/common/basic-parser-types';
import FlinkSQL from '../../../../src/parser/flinksql'
const syntaxSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'syntaxSuggestion.sql'), 'utf-8');
describe('Flink SQL Syntax Suggestion', () => {
const parser = new FlinkSQL();
test('Validate Syntax SQL', () => {
expect(parser.validate(syntaxSql).length).not.toBe(0);
expect(parser.validate(syntaxSql).length).not.toBe(0);
expect(parser.validate(syntaxSql).length).not.toBe(0);
})
test('Insert table ', () => {
const pos: CaretPosition = {
lineNumber: 1,
column: 22
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.TABLE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.', 'db', '.', 'tb' ])
})
test('Select table', () => {
const pos: CaretPosition = {
lineNumber: 3,
column: 21
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.TABLE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.', 'db' ])
})
test('Create table', () => {
const pos: CaretPosition = {
lineNumber: 5,
column: 20
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.TABLE_CREATE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.', 'db' ])
})
test('Show tables from', () => {
const pos: CaretPosition = {
lineNumber: 7,
column: 21
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.TABLE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat' ])
})
test('Alter database', () => {
const pos: CaretPosition = {
lineNumber: 9,
column: 20
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.' ])
})
test('Use database', () => {
const pos: CaretPosition = {
lineNumber: 9,
column: 20
}
const suggestion = parser.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax?.[0];
expect(suggestion?.syntaxContextType === SyntaxContextType.DATABASE)
expect(suggestion?.wordRanges.map(token => token.text))
.toEqual([ 'cat', '.' ])
})
})

View File

@ -0,0 +1,57 @@
import fs from 'fs';
import path from 'path';
import { CaretPosition } from '../../../../src/parser/common/basic-parser-types';
import FlinkSQL from '../../../../src/parser/flinksql'
const tokenSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'tokenSuggestion.sql'), 'utf-8');
describe('Flink SQL Syntax Suggestion', () => {
const parser = new FlinkSQL();
test('Use Statement ', () => {
const pos: CaretPosition = {
lineNumber: 3,
column: 5
}
const suggestion = parser.getSuggestionAtCaretPosition(tokenSql, pos)?.keywords;
expect(suggestion)
.toEqual([ 'MODULES', 'CATALOG' ])
})
test('Create Statement ', () => {
const pos: CaretPosition = {
lineNumber: 5,
column: 8
}
const suggestion = parser.getSuggestionAtCaretPosition(tokenSql, pos)?.keywords;
expect(suggestion)
.toEqual([ 'CATALOG', 'FUNCTION', 'TEMPORARY', 'VIEW', 'DATABASE', 'TABLE' ])
})
test('Show Statement ', () => {
const pos: CaretPosition = {
lineNumber: 7,
column: 6
}
const suggestion = parser.getSuggestionAtCaretPosition(tokenSql, pos)?.keywords;
expect(suggestion)
.toEqual([
'MODULES',
'FULL',
'FUNCTIONS',
'USER',
'CREATE',
'COLUMNS',
'TABLES',
'CURRENT',
'CATALOGS',
'DATABASES',
'JARS',
'VIEWS'
])
})
})

View File

@ -961,14 +961,21 @@ ansi-styles@^5.0.0:
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b"
integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==
antlr4-c3@^3.0.1:
version "3.0.1"
resolved "https://registry.npmmirror.com/antlr4-c3/-/antlr4-c3-3.0.1.tgz#e9b6ba7b796e7afc35630ffa6fac4d915f152c99"
integrity sha512-Vbizas0WK4Id6l1f48ANYHFZgfzmj82LZx4OuB/a87vCpyoUwofrlJ+sCiL1qT/D2SWf8HEcBPCsgskdx8NnzQ==
dependencies:
antlr4ts "0.5.0-alpha.4"
antlr4ts-cli@^0.5.0-alpha.4: antlr4ts-cli@^0.5.0-alpha.4:
version "0.5.0-alpha.4" version "0.5.0-alpha.4"
resolved "https://registry.yarnpkg.com/antlr4ts-cli/-/antlr4ts-cli-0.5.0-alpha.4.tgz#f3bfc37f10131e78d7b981c397a2aaa0450b67f6" resolved "https://registry.yarnpkg.com/antlr4ts-cli/-/antlr4ts-cli-0.5.0-alpha.4.tgz#f3bfc37f10131e78d7b981c397a2aaa0450b67f6"
integrity sha512-lVPVBTA2CVHRYILSKilL6Jd4hAumhSZZWA7UbQNQrmaSSj7dPmmYaN4bOmZG79cOy0lS00i4LY68JZZjZMWVrw== integrity sha512-lVPVBTA2CVHRYILSKilL6Jd4hAumhSZZWA7UbQNQrmaSSj7dPmmYaN4bOmZG79cOy0lS00i4LY68JZZjZMWVrw==
antlr4ts@^0.5.0-alpha.4: antlr4ts@0.5.0-alpha.4, antlr4ts@^0.5.0-alpha.4:
version "0.5.0-alpha.4" version "0.5.0-alpha.4"
resolved "https://registry.yarnpkg.com/antlr4ts/-/antlr4ts-0.5.0-alpha.4.tgz#71702865a87478ed0b40c0709f422cf14d51652a" resolved "https://registry.npmmirror.com/antlr4ts/-/antlr4ts-0.5.0-alpha.4.tgz#71702865a87478ed0b40c0709f422cf14d51652a"
integrity sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ== integrity sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ==
anymatch@^3.0.3: anymatch@^3.0.3: