feat: deprecate legacy APIs (#304)

* feat: deprecation of legacy APIs

* feat: deprecate plsql language
This commit is contained in:
Hayden
2024-04-28 11:41:13 +08:00
committed by GitHub
parent a5387e4729
commit f1c3bbe17c
23 changed files with 1 additions and 248251 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,4 @@
export {
MySQL,
FlinkSQL,
SparkSQL,
HiveSQL,
PostgreSQL,
TrinoSQL,
ImpalaSQL,
PLSQL,
} from './parser';
export { MySQL, FlinkSQL, SparkSQL, HiveSQL, PostgreSQL, TrinoSQL, ImpalaSQL } from './parser';
export {
MySqlParserListener,
@ -18,8 +9,6 @@ export {
SparkSqlParserVisitor,
HiveSqlParserListener,
HiveSqlParserVisitor,
PlSqlParserListener,
PlSqlParserVisitor,
PostgreSqlParserListener,
PostgreSqlParserVisitor,
TrinoSqlListener,
@ -30,14 +19,6 @@ export {
export { EntityContextType } from './parser/common/types';
export {
/**
* @deprecated SyntaxContextType has been renamed to {@link EntityContextType},
* It will be removed when the stable version is released.
*/
EntityContextType as SyntaxContextType,
} from './parser/common/types';
export { StmtContextType } from './parser/common/entityCollector';
export type { CaretPosition, Suggestions, SyntaxSuggestion } from './parser/common/types';
@ -47,8 +28,3 @@ export type { WordRange, TextSlice } from './parser/common/textAndWord';
export type { SyntaxError, ParseError, ErrorListener } from './parser/common/parseErrorListener';
export type { StmtContext, EntityContext } from './parser/common/entityCollector';
/**
* @deprecated Legacy utils will be removed when the stable version is released.
*/
export * from './utils';

View File

@ -7,9 +7,6 @@ export { MySqlParserVisitor } from './mysql/MySqlParserVisitor';
export { HiveSqlParserListener } from './hive/HiveSqlParserListener';
export { HiveSqlParserVisitor } from './hive/HiveSqlParserVisitor';
export { PlSqlParserListener } from './plsql/PlSqlParserListener';
export { PlSqlParserVisitor } from './plsql/PlSqlParserVisitor';
export { SparkSqlParserListener } from './spark/SparkSqlParserListener';
export { SparkSqlParserVisitor } from './spark/SparkSqlParserVisitor';

View File

@ -1,8 +0,0 @@
import { Lexer } from "antlr4ng";
export abstract class PlSqlBaseLexer extends Lexer {
IsNewlineAtPos(pos: number): boolean {
const la = this._input.LA(pos);
return la == -1 || String.fromCharCode(la) == '\n';
}
}

View File

@ -1,29 +0,0 @@
import { TokenStream } from "antlr4ng";
import {SQLParserBase} from "../SQLParserBase";
export abstract class PlSqlBaseParser extends SQLParserBase {
private _isVersion10: boolean = false;
private _isVersion12: boolean = true;
constructor(input: TokenStream) {
super(input);
this._isVersion10 = false;
this._isVersion12 = true;
}
isVersion10(): boolean {
return this._isVersion10;
}
isVersion12(): boolean {
return this._isVersion12;
}
setVersion10(value: boolean): void {
this._isVersion10 = value;
}
setVersion12(value: boolean): void {
this._isVersion12 = value;
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,4 @@
export { MySQL } from './mysql';
export { PLSQL } from './plsql';
export { HiveSQL } from './hive';
export { FlinkSQL } from './flink';
export { SparkSQL } from './spark';

View File

@ -1,37 +0,0 @@
import { CharStream, CommonTokenStream, Token } from 'antlr4ng';
import { CandidatesCollection } from 'antlr4-c3';
import { PlSqlLexer } from '../lib/plsql/PlSqlLexer';
import { PlSqlParser, ProgramContext } from '../lib/plsql/PlSqlParser';
import { BasicSQL } from './common/basicSQL';
import { Suggestions } from './common/types';
export class PLSQL extends BasicSQL<PlSqlLexer, ProgramContext, PlSqlParser> {
protected createLexerFromCharStream(charStreams: CharStream) {
return new PlSqlLexer(charStreams);
}
protected createParserFromTokenStream(tokenStream: CommonTokenStream) {
return new PlSqlParser(tokenStream);
}
protected preferredRules: Set<number> = new Set();
protected get splitListener() {
return null as any;
}
protected createEntityCollector(input: string, caretTokenIndex?: number) {
return null as any;
}
protected processCandidates(
candidates: CandidatesCollection,
allTokens: Token[],
caretTokenIndex: number
): Suggestions<Token> {
return {
syntax: [],
keywords: [],
};
}
}

View File

@ -1,202 +0,0 @@
import { Legacy_TokenType, Legacy_Token, Legacy_TokenReg } from './token';
/**
* @param {String} sql
* @deprecated use parser.createLexer() instead.
*/
function legacy_lexer(input: string): Legacy_Token[] {
let current = 0;
let line = 1;
const tokens: Legacy_Token[] = [];
const extract = (
currentChar: string,
validator: RegExp,
TokenType: Legacy_TokenType
): Legacy_Token => {
let value = '';
const start = current;
while (validator.test(currentChar)) {
value += currentChar;
currentChar = input[++current];
}
return {
type: TokenType,
start: start,
end: current,
lineNumber: line,
value: value,
};
};
const matchFunction = () => {
const bracketNum = [current];
for (let i = current + 1; i < input.length; i++) {
const currentChar = input[i];
if (currentChar === '\n') {
line++;
}
if (Legacy_TokenReg.LeftSmallBracket.test(currentChar)) {
bracketNum.push(i);
}
if (Legacy_TokenReg.RightSmallBracket.test(currentChar)) {
const start = bracketNum.pop();
const end = i + 1;
if (bracketNum.length === 0) {
current = end;
tokens.push({
type: Legacy_TokenType.FunctionArguments,
value: input.slice(start, end),
start,
lineNumber: line,
end,
});
return;
}
}
}
};
const matchQuotation = (
currentChar: string,
validator: RegExp,
TokenType: Legacy_TokenType
) => {
do {
if (currentChar === '\n') {
line++;
}
currentChar = input[++current];
} while (!validator.test(currentChar));
++current;
};
while (current < input.length) {
let char = input[current];
if (char === '\n') {
line++;
current++;
continue;
}
if (Legacy_TokenReg.LeftSmallBracket.test(char)) {
matchFunction();
continue;
}
if (Legacy_TokenReg.BackQuotation.test(char)) {
matchQuotation(char, Legacy_TokenReg.BackQuotation, Legacy_TokenType.BackQuotation);
continue;
}
if (Legacy_TokenReg.SingleQuotation.test(char)) {
matchQuotation(char, Legacy_TokenReg.SingleQuotation, Legacy_TokenType.SingleQuotation);
continue;
}
if (Legacy_TokenReg.DoubleQuotation.test(char)) {
matchQuotation(char, Legacy_TokenReg.DoubleQuotation, Legacy_TokenType.DoubleQuotation);
continue;
}
if (char === '-' && input[current + 1] === '-') {
let value = '';
const start = current;
while (char !== '\n' && current < input.length) {
value += char;
char = input[++current];
}
tokens.push({
type: Legacy_TokenType.Comment,
value,
start: start,
lineNumber: line,
end: current,
});
continue;
}
if (char === '/' && input[current + 1] === '*') {
let value = '';
const start = current;
const startLine = line;
while (!(char === '/' && input[current - 1] === '*')) {
if (char === '\n') {
line++;
}
value += char;
char = input[++current];
}
value += char;
++current;
tokens.push({
type: Legacy_TokenType.Comment,
value,
start: start,
lineNumber: startLine,
end: current,
});
continue;
}
if (Legacy_TokenReg.StatementTerminator.test(char)) {
const newToken = extract(
char,
Legacy_TokenReg.StatementTerminator,
Legacy_TokenType.StatementTerminator
);
tokens.push(newToken);
continue;
}
current++;
}
return tokens;
}
/**
* split sql
* @param {String} sql
* @deprecated use parser.splitSQLByStatement() instead.
*/
function legacy_splitSql(sql: string) {
const tokens = legacy_lexer(sql);
const sqlArr = [];
let startIndex = 0;
tokens.forEach((ele: Legacy_Token) => {
if (ele.type === Legacy_TokenType.StatementTerminator) {
sqlArr.push(sql.slice(startIndex, ele.end));
startIndex = ele.end;
}
});
if (startIndex < sql.length) {
sqlArr.push(sql.slice(startIndex));
}
return sqlArr;
}
/**
* clean comment
* @param {String} sql
* @deprecated will be removed in future.
*/
function legacy_cleanSql(sql: string) {
sql = sql.trim();
const tokens = legacy_lexer(sql);
let resultSql = '';
let startIndex = 0;
tokens.forEach((ele: Legacy_Token) => {
if (ele.type === Legacy_TokenType.Comment) {
resultSql += sql.slice(startIndex, ele.start);
startIndex = ele.end + 1;
}
});
resultSql += sql.slice(startIndex);
return resultSql;
}
export { legacy_cleanSql, legacy_splitSql, legacy_lexer };

View File

@ -1,62 +0,0 @@
/**
* @deprecated will be removed in future.
*/
export enum Legacy_TokenType {
/**
* Enclosed in single/double/back quotation, `` Symbol
* 'abc', "abc", `abc`
*/
SingleQuotation = 'SingleQuotation',
DoubleQuotation = 'DoubleQuotation',
BackQuotation = 'BackQuotation',
/**
* Language element type
*/
Comment = 'Comment',
/**
* Statement
*/
StatementTerminator = 'StatementTerminator',
/**
* Others
*/
Error = 'Error',
/**
* Left small Bracket
*/
LeftSmallBracket = 'LeftSmallBracket',
/**
* Left small Bracket
*/
RightSmallBracket = 'RightSmallBracket',
Comma = 'Comma',
FunctionArguments = 'FunctionArguments',
}
/**
* @deprecated will be removed in future.
* Token object
*/
export interface Legacy_Token {
type: Legacy_TokenType;
value: string;
start?: number;
end: number;
lineNumber: number;
message?: string;
}
/**
* @deprecated will be removed in future.
* Token recognition rules
*/
export const Legacy_TokenReg = {
[Legacy_TokenType.StatementTerminator]: /[;]/,
[Legacy_TokenType.SingleQuotation]: /['|\']/,
[Legacy_TokenType.DoubleQuotation]: /["]/,
[Legacy_TokenType.BackQuotation]: /[`]/,
[Legacy_TokenType.LeftSmallBracket]: /[(]/,
[Legacy_TokenType.RightSmallBracket]: /[)]/,
[Legacy_TokenType.Comma]: /[,]/,
};