Skip to content

Commit 978102f

Browse files
committed
Drift files: Support annotations in comments
Closes #3057
1 parent 968705a commit 978102f

9 files changed

Lines changed: 180 additions & 100 deletions

File tree

docs/content/sql_api/drift_files.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,17 @@ what we got:
4242
above, thats the `AllTodosResult` class, which contains all fields from
4343
`todos` and the description of the associated category.
4444

45+
!!! tip "Comment syntax for better editor support"
46+
47+
While drift files are mostly just SQL, `import` statements and names for
48+
statements prevents some editors from recognizing that.
49+
Starting from drift 2.32, you can also write drift files using comments:
50+
51+
<Snippet href="/lib/src/snippets/drift_files/tables_comments.drift" name="(full)" />
52+
53+
Configuring your editor to treat drift files as SQL text then allows syntax highlighting
54+
and formatting tools to interpret them correctly.
55+
4556
## Variables
4657

4758
Inside of named queries, you can use variables just like you would expect with

docs/lib/src/generated_snippets.dart

Lines changed: 2 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
-- import 'tables.drift';
2+
3+
-- we can put named SQL queries in here as well:
4+
-- createEntry:
5+
INSERT INTO todos (title, content) VALUES (:title, :content);
6+
7+
-- deleteById:
8+
DELETE FROM todos WHERE id = :id;
9+
10+
-- allTodos:
11+
SELECT * FROM todos;

sqlparser/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
- Refactor the different `parse` methods on `SqlEngine` into a single one
44
taking an `ParserEntrypoint` enum.
5+
- In drift files, allow reading imports and statement metadata from comments.
56

67
## 0.43.1
78

sqlparser/lib/src/engine/sql_engine.dart

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import 'package:sqlparser/sqlparser.dart';
66
import 'package:sqlparser/src/reader/parser.dart';
77
import 'package:sqlparser/src/reader/tokenizer/scanner.dart';
88

9+
import '../reader/tokenizer/token_source.dart';
910
import 'autocomplete/engine.dart';
1011
import 'builtin_tables.dart';
1112

@@ -112,10 +113,10 @@ final class SqlEngine {
112113
return scope;
113114
}
114115

115-
ScannerTokenSource _prepareScanning(FileSpan source) {
116+
TokenSource _prepareScanning(FileSpan source) {
116117
final scanner =
117118
Scanner(source, scanDriftTokens: options.useDriftExtensions);
118-
return ScannerTokenSource(scanner);
119+
return TokenSource(scanner);
119120
}
120121

121122
/// Tokenizes the [source] into a list list [Token]s. Each [Token] contains
@@ -129,12 +130,7 @@ final class SqlEngine {
129130
/// you need to filter them. When using the methods in this class, this will
130131
/// be taken care of automatically.
131132
List<Token> tokenize(FileSpan source) {
132-
final scanner = _prepareScanning(source);
133-
// Read until end
134-
Token token;
135-
do {
136-
token = scanner.readToken();
137-
} while (token.type != TokenType.eof);
133+
final scanner = _prepareScanning(source)..readTokensUntilEnd();
138134

139135
final errors = scanner.scanner.errors;
140136
if (errors.isNotEmpty) {

sqlparser/lib/src/reader/parser.dart

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ extension Parser on ParserState {
6363
bool get enableDriftExtensions => options.useDriftExtensions;
6464

6565
void _suggestHint(HintDescription description) {
66-
autoComplete?.addHint(Hint(tokens.previous, description));
66+
autoComplete?.addHint(Hint(tokens.lastConsumedToken, description));
6767
}
6868

6969
void _suggestHintForTokens(Iterable<TokenType> types) {
@@ -81,7 +81,7 @@ extension Parser on ParserState {
8181
bool get _isAtEnd => _peek.type == TokenType.eof;
8282
Token get _peek => tokens.lookahead1();
8383

84-
Token get _previous => tokens.previous!;
84+
Token get _previous => tokens.lastConsumedToken!;
8585

8686
bool _match(Iterable<TokenType> types) {
8787
if (_reportAutoComplete) _suggestHintForTokens(types);
@@ -114,14 +114,14 @@ extension Parser on ParserState {
114114
/// "NOT" followed by [type]. Does not consume any tokens.
115115
bool _checkWithNot(TokenType type) {
116116
if (_check(type)) return true;
117-
final (peek, next) = tokens.lookahead2();
117+
final (peek, next) = tokens.keywordLookahead2();
118118
return peek.type == TokenType.not && next?.type == type;
119119
}
120120

121121
/// Like [_checkWithNot], but with more than one token type.
122122
bool _checkAnyWithNot(List<TokenType> types) {
123123
if (types.any(_check)) return true;
124-
final (peek, next) = tokens.lookahead2();
124+
final (peek, next) = tokens.keywordLookahead2();
125125
return peek.type == TokenType.not && types.contains(next?.type);
126126
}
127127

@@ -313,16 +313,20 @@ extension Parser on ParserState {
313313
}
314314

315315
DriftFile driftFile() {
316+
tokens.scanner.isInTopLevelDriftFile = true;
316317
final first = _peek;
317-
final foundComponents = <PartOfDriftFile?>[];
318+
final foundComponents = <PartOfDriftFile>[];
318319

319-
while (!_isAtEnd) {
320-
foundComponents.add(_parseAsStatement(_partOfDriftFile));
321-
}
320+
while (true) {
321+
tokens.scanner.isInTopLevelDriftFile = true;
322+
if (_isAtEnd) break;
322323

323-
foundComponents.removeWhere((c) => c == null);
324+
if (_parseAsStatement(_partOfDriftFile) case final component?) {
325+
foundComponents.add(component);
326+
}
327+
}
324328

325-
final file = DriftFile(foundComponents.cast());
329+
final file = DriftFile(foundComponents);
326330
if (foundComponents.isNotEmpty) {
327331
file.setSpan(first, _previous);
328332
} else {
@@ -334,6 +338,9 @@ extension Parser on ParserState {
334338
}
335339

336340
PartOfDriftFile _partOfDriftFile() {
341+
_peek;
342+
tokens.scanner.isInTopLevelDriftFile = false;
343+
337344
final found = _import() ?? _create() ?? _declaredStatement();
338345

339346
if (found != null) {
@@ -3070,7 +3077,7 @@ final class _ExpressionParser extends ParserState {
30703077

30713078
if (_peek is KeywordToken) {
30723079
// Improve error messages for possible function calls, https://github.com/simolus3/drift/discussions/2277
3073-
final (_, next) = tokens.lookahead2();
3080+
final (_, next) = tokens.keywordLookahead2();
30743081
if (next?.type == TokenType.leftParen) {
30753082
_error(
30763083
'Expected an expression here, but got a reserved keyword. Did you '

sqlparser/lib/src/reader/tokenizer/scanner.dart

Lines changed: 27 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -4,42 +4,8 @@ import 'package:charcode/charcode.dart';
44
import 'package:source_span/source_span.dart';
55

66
import 'token.dart';
7-
import 'token_source.dart';
87
import 'utils.dart';
98

10-
final class ScannerTokenSource extends TokenSource {
11-
final List<Token> tokens = [];
12-
final Scanner scanner;
13-
14-
ScannerTokenSource(this.scanner);
15-
16-
int _index = 0;
17-
Token? _previous;
18-
19-
@override
20-
Token readToken() {
21-
while (true) {
22-
final read = scanner.scanToken();
23-
if (read is TokenizerError) {
24-
scanner.errors.add(read);
25-
continue;
26-
}
27-
28-
read
29-
..index = _index++
30-
..previous = _previous;
31-
_previous?.next = read;
32-
_previous = read;
33-
tokens.add(read);
34-
if (read.invisibleToParser) {
35-
continue;
36-
}
37-
38-
return read;
39-
}
40-
}
41-
}
42-
439
class Scanner {
4410
final Uint16List _charCodes;
4511
final List<TokenizerError> errors = [];
@@ -48,6 +14,8 @@ class Scanner {
4814
final bool scanDriftTokens;
4915
final SourceFile _file;
5016

17+
bool isInTopLevelDriftFile = false;
18+
5119
/// Pending opening tokens used to associate them with closing tokens.
5220
///
5321
/// This used to pair matching parentheses, as the information can be used by
@@ -520,13 +488,28 @@ class Scanner {
520488

521489
/// Scans a line comment after the -- has already been read.
522490
CommentToken _lineComment() {
523-
final contentBuilder = StringBuffer();
524-
while (!_isAtEnd && _peek() != $lf) {
525-
contentBuilder.writeCharCode(_nextChar());
491+
var nextLineBreak = _charCodes.indexOf($lf, _currentOffset);
492+
if (nextLineBreak == -1) {
493+
nextLineBreak = _endOffset;
526494
}
527495

528-
return CommentToken(
529-
CommentMode.line, contentBuilder.toString(), _currentSpan);
496+
final content = String.fromCharCodes(
497+
_charCodes.getRange(_currentOffset, nextLineBreak));
498+
if (scanDriftTokens && isInTopLevelDriftFile) {
499+
// We can parse line comments as import statements or named statements.
500+
// We currently use fairly crude heuristics for this: The structures we
501+
// want to parse end with colons or semicolons, so we'll parse those if
502+
// the comment is at the right location.
503+
if (_importComment.hasMatch(content) ||
504+
_statementMeta.hasMatch(content)) {
505+
// End the comment token without consuming the line. This will treat the
506+
// initial `--` as a comment and allows us to parse the rest.
507+
return CommentToken(CommentMode.line, '', _currentSpan);
508+
}
509+
}
510+
511+
_currentOffset = nextLineBreak;
512+
return CommentToken(CommentMode.line, content, _currentSpan);
530513
}
531514

532515
/// Scans a /* ... */ comment after the first /* has already been read.
@@ -550,4 +533,9 @@ class Scanner {
550533
return CommentToken(
551534
CommentMode.cStyle, contentBuilder.toString(), _currentSpan);
552535
}
536+
537+
static final _importComment = RegExp(r'^\s*import.*;', caseSensitive: false);
538+
// match `foo:` or `myQuery (:variable AS TEXT):`
539+
static final _statementMeta =
540+
RegExp(r'^\s*\w+\s*(?:\(.*\)\s*)?:', caseSensitive: false);
553541
}
Lines changed: 46 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,48 @@
11
import 'package:meta/meta.dart';
22

3+
import 'scanner.dart';
34
import 'token.dart';
45

56
@internal
6-
abstract base class TokenSource {
7-
Token? _lastConsumedToken;
8-
7+
final class TokenSource {
8+
/// A list of tokens that have been scanned.
9+
///
10+
/// Note that this just collects tokens obtained through [readToken] and thus
11+
/// doesn't include potential tokens of source regions that haven't been
12+
/// parsed yet.
13+
final List<Token> tokens = [];
14+
final Scanner scanner;
15+
16+
int _index = 0;
17+
Token? _previous;
18+
Token? lastConsumedToken;
919
// Up to two tokens of lookahead in used by the parser.
1020
Token? _pendingLookahead0;
1121
Token? _pendingLookahead1;
1222

13-
Token? get previous => _lastConsumedToken;
14-
15-
TokenSource();
16-
17-
factory TokenSource.fromIterator(Iterator<Token> tokens) {
18-
return _IteratorTokenSource(tokens);
19-
}
23+
TokenSource(this.scanner);
2024

21-
Token lookahead1() {
25+
Token lookahead1({bool treatCommentAsDriftAnnotation = false}) {
2226
if (_pendingLookahead0 case final token?) {
2327
return token;
2428
} else {
25-
return _pendingLookahead0 = readToken();
29+
return _pendingLookahead0 = _readToken();
2630
}
2731
}
2832

29-
(Token, Token?) lookahead2() {
33+
/// Returns the next two tokens, but only if the next token is a keyword
34+
/// token.
35+
///
36+
/// This special case is used to parse `NOT IN`, `NOT BETWEEN` and similar
37+
/// structures.
38+
(Token, Token?) keywordLookahead2() {
3039
final peek = lookahead1();
31-
if (peek.type == TokenType.eof) {
40+
if (peek is! KeywordToken) {
3241
return (peek, null);
3342
}
3443

3544
final next = switch (_pendingLookahead1) {
36-
null => _pendingLookahead1 = readToken(),
45+
null => _pendingLookahead1 = _readToken(),
3746
final pending => pending,
3847
};
3948
return (peek, next);
@@ -43,36 +52,40 @@ abstract base class TokenSource {
4352
if (_pendingLookahead0 case final token?) {
4453
_pendingLookahead0 = _pendingLookahead1;
4554
_pendingLookahead1 = null;
46-
return _lastConsumedToken = token;
55+
return lastConsumedToken = token;
4756
} else {
4857
// We should have rolled this into pendingLookahead0 when consuming.
4958
assert(_pendingLookahead1 == null);
50-
return _lastConsumedToken = readToken();
59+
return lastConsumedToken = _readToken();
5160
}
5261
}
5362

54-
@protected
55-
Token readToken();
56-
}
57-
58-
final class _IteratorTokenSource extends TokenSource {
59-
final Iterator<Token> _tokens;
60-
61-
_IteratorTokenSource(this._tokens);
62-
63-
@override
64-
Token readToken() {
63+
Token _readToken() {
6564
while (true) {
66-
if (!_tokens.moveNext()) {
67-
throw StateError('Called readTokens past eof');
65+
final read = scanner.scanToken();
66+
if (read is TokenizerError) {
67+
scanner.errors.add(read);
68+
continue;
6869
}
6970

70-
final token = _tokens.current;
71-
if (token.invisibleToParser) {
71+
read
72+
..index = _index++
73+
..previous = _previous;
74+
_previous?.next = read;
75+
_previous = read;
76+
tokens.add(read);
77+
if (read.invisibleToParser) {
7278
continue;
7379
}
7480

75-
return token;
81+
return read;
7682
}
7783
}
84+
85+
void readTokensUntilEnd() {
86+
Token token;
87+
do {
88+
token = _readToken();
89+
} while (token.type != TokenType.eof);
90+
}
7891
}

0 commit comments

Comments
 (0)