-
Notifications
You must be signed in to change notification settings - Fork 390
/
dbstate.py
1048 lines (819 loc) · 33.7 KB
/
dbstate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import Any, Optional, Tuple, Iterator, Dict, List, NamedTuple, Self
import dataclasses
import enum
import io
import pickle
import time
import uuid
import immutables
from edb import errors
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb.schema import delta as s_delta
from edb.schema import migrations as s_migrations
from edb.schema import objects as s_obj
from edb.schema import schema as s_schema
from edb.server import config
from edb.server import defines
from edb.pgsql import codegen as pgcodegen
from . import enums
from . import sertypes
class TxAction(enum.IntEnum):
START = 1
COMMIT = 2
ROLLBACK = 3
DECLARE_SAVEPOINT = 4
RELEASE_SAVEPOINT = 5
ROLLBACK_TO_SAVEPOINT = 6
class MigrationAction(enum.IntEnum):
START = 1
POPULATE = 2
DESCRIBE = 3
ABORT = 4
COMMIT = 5
REJECT_PROPOSED = 6
@dataclasses.dataclass(frozen=True)
class BaseQuery:
sql: Tuple[bytes, ...]
cache_sql: Optional[Tuple[bytes, bytes]] = dataclasses.field(
kw_only=True, default=None
) # (persist, evict)
@property
def is_transactional(self) -> bool:
return True
@dataclasses.dataclass(frozen=True)
class NullQuery(BaseQuery):
sql: Tuple[bytes, ...] = tuple()
is_transactional: bool = True
has_dml: bool = False
@dataclasses.dataclass(frozen=True)
class Query(BaseQuery):
sql_hash: bytes
cardinality: enums.Cardinality
out_type_data: bytes
out_type_id: bytes
in_type_data: bytes
in_type_id: bytes
in_type_args: Optional[List[Param]] = None
globals: Optional[list[tuple[str, bool]]] = None
is_transactional: bool = True
has_dml: bool = False
cacheable: bool = True
is_explain: bool = False
query_asts: Any = None
append_rollback: bool = False
@dataclasses.dataclass(frozen=True)
class SimpleQuery(BaseQuery):
sql: Tuple[bytes, ...]
is_transactional: bool = True
has_dml: bool = False
# XXX: Temporary hack, since SimpleQuery will die
in_type_args: Optional[List[Param]] = None
@dataclasses.dataclass(frozen=True)
class SessionStateQuery(BaseQuery):
config_scope: Optional[qltypes.ConfigScope] = None
is_backend_setting: bool = False
requires_restart: bool = False
is_system_config: bool = False
config_op: Optional[config.Operation] = None
is_transactional: bool = True
globals: Optional[list[tuple[str, bool]]] = None
in_type_data: Optional[bytes] = None
in_type_id: Optional[bytes] = None
in_type_args: Optional[List[Param]] = None
@dataclasses.dataclass(frozen=True)
class DDLQuery(BaseQuery):
user_schema: s_schema.FlatSchema
global_schema: Optional[s_schema.FlatSchema] = None
cached_reflection: Any = None
is_transactional: bool = True
create_db: Optional[str] = None
drop_db: Optional[str] = None
drop_db_reset_connections: bool = False
create_db_template: Optional[str] = None
create_db_mode: Optional[qlast.BranchType] = None
ddl_stmt_id: Optional[str] = None
config_ops: List[config.Operation] = (
dataclasses.field(default_factory=list))
@dataclasses.dataclass(frozen=True)
class TxControlQuery(BaseQuery):
action: TxAction
cacheable: bool
modaliases: Optional[immutables.Map[Optional[str], str]]
is_transactional: bool = True
user_schema: Optional[s_schema.Schema] = None
global_schema: Optional[s_schema.Schema] = None
cached_reflection: Any = None
sp_name: Optional[str] = None
sp_id: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class MigrationControlQuery(BaseQuery):
action: MigrationAction
tx_action: Optional[TxAction]
cacheable: bool
modaliases: Optional[immutables.Map[Optional[str], str]]
is_transactional: bool = True
user_schema: Optional[s_schema.FlatSchema] = None
cached_reflection: Any = None
ddl_stmt_id: Optional[str] = None
@dataclasses.dataclass(frozen=True)
class MaintenanceQuery(BaseQuery):
is_transactional: bool = True
@dataclasses.dataclass(frozen=True)
class Param:
name: str
required: bool
array_type_id: Optional[uuid.UUID]
outer_idx: Optional[int]
sub_params: Optional[tuple[list[Optional[uuid.UUID]], tuple[Any, ...]]]
#############################
@dataclasses.dataclass
class QueryUnit:
sql: Tuple[bytes, ...]
# Status-line for the compiled command; returned to front-end
# in a CommandComplete protocol message if the command is
# executed successfully. When a QueryUnit contains multiple
# EdgeQL queries, the status reflects the last query in the unit.
status: bytes
cache_key: Optional[uuid.UUID] = None
cache_sql: Optional[Tuple[bytes, bytes]] = None # (persist, evict)
# Output format of this query unit
output_format: enums.OutputFormat = enums.OutputFormat.NONE
# Set only for units that contain queries that can be cached
# as prepared statements in Postgres.
sql_hash: bytes = b''
# True if all statments in *sql* can be executed inside a transaction.
# If False, they will be executed separately.
is_transactional: bool = True
# Capabilities used in this query
capabilities: enums.Capability = enums.Capability(0)
# True if this unit contains SET commands.
has_set: bool = False
# If tx_id is set, it means that the unit
# starts a new transaction.
tx_id: Optional[int] = None
# True if this unit is single 'COMMIT' command.
# 'COMMIT' is always compiled to a separate QueryUnit.
tx_commit: bool = False
# True if this unit is single 'ROLLBACK' command.
# 'ROLLBACK' is always compiled to a separate QueryUnit.
tx_rollback: bool = False
# True if this unit is single 'ROLLBACK TO SAVEPOINT' command.
# 'ROLLBACK TO SAVEPOINT' is always compiled to a separate QueryUnit.
tx_savepoint_rollback: bool = False
tx_savepoint_declare: bool = False
# True if this unit is `ABORT MIGRATION` command within a transaction,
# that means abort_migration and tx_rollback cannot be both True
tx_abort_migration: bool = False
# For SAVEPOINT commands, the name and sp_id
sp_name: Optional[str] = None
sp_id: Optional[int] = None
# True if it is safe to cache this unit.
cacheable: bool = False
# If non-None, contains a name of the DB that is about to be
# created/deleted. If it's the former, the IO process needs to
# introspect the new db. If it's the later, the server should
# close all inactive unused pooled connections to it.
create_db: Optional[str] = None
drop_db: Optional[str] = None
drop_db_reset_connections: bool = False
# If non-None, contains a name of the DB that will be used as
# a template database to create the database. The server should
# close all inactive unused pooled connections to the template db.
create_db_template: Optional[str] = None
create_db_mode: Optional[str] = None
# If non-None, the DDL statement will emit data packets marked
# with the indicated ID.
ddl_stmt_id: Optional[str] = None
# Cardinality of the result set. Set to NO_RESULT if the
# unit represents multiple queries compiled as one script.
cardinality: enums.Cardinality = \
enums.Cardinality.NO_RESULT
out_type_data: bytes = sertypes.NULL_TYPE_DESC
out_type_id: bytes = sertypes.NULL_TYPE_ID.bytes
in_type_data: bytes = sertypes.NULL_TYPE_DESC
in_type_id: bytes = sertypes.NULL_TYPE_ID.bytes
in_type_args: Optional[List[Param]] = None
in_type_args_real_count: int = 0
globals: Optional[list[tuple[str, bool]]] = None
# Set only when this unit contains a CONFIGURE INSTANCE command.
system_config: bool = False
# Set only when this unit contains a CONFIGURE DATABASE command.
database_config: bool = False
# Set only when this unit contains an operation that needs to have
# its results read back in the middle of the script.
# (SET GLOBAL, CONFIGURE DATABASE)
needs_readback: bool = False
# Whether any configuration change requires a server restart
config_requires_restart: bool = False
# Set only when this unit contains a CONFIGURE command which
# alters a backend configuration setting.
backend_config: bool = False
# Set only when this unit contains a CONFIGURE command which
# alters a system configuration setting.
is_system_config: bool = False
config_ops: List[config.Operation] = (
dataclasses.field(default_factory=list))
modaliases: Optional[immutables.Map[Optional[str], str]] = None
# If present, represents the future schema state after
# the command is run. The schema is pickled.
user_schema: Optional[bytes] = None
# Unlike user_schema, user_schema_version usually exist, pointing to the
# latest user schema, which is self.user_schema if changed, or the user
# schema this QueryUnit was compiled upon.
user_schema_version: uuid.UUID | None = None
cached_reflection: Optional[bytes] = None
extensions: Optional[set[str]] = None
ext_config_settings: Optional[list[config.Setting]] = None
# If present, represents the future global schema state
# after the command is run. The schema is pickled.
global_schema: Optional[bytes] = None
roles: immutables.Map[str, immutables.Map[str, Any]] | None = None
is_explain: bool = False
query_asts: Any = None
append_rollback: bool = False
@property
def has_ddl(self) -> bool:
return bool(self.capabilities & enums.Capability.DDL)
@property
def tx_control(self) -> bool:
return (
bool(self.tx_id)
or self.tx_rollback
or self.tx_commit
or self.tx_savepoint_declare
or self.tx_savepoint_rollback
)
def serialize(self) -> bytes:
rv = io.BytesIO()
rv.write(b"\x00") # 1 byte of version number
pickle.dump(self, rv, -1)
return rv.getvalue()
@classmethod
def deserialize(cls, data: bytes) -> Self:
buf = memoryview(data)
match buf[0]:
case 0x00:
return pickle.loads(buf[1:]) # type: ignore[no-any-return]
raise ValueError(f"Bad version number: {buf[0]}")
@dataclasses.dataclass
class QueryUnitGroup:
# All capabilities used by any query units in this group
capabilities: enums.Capability = enums.Capability(0)
# True if it is safe to cache this unit.
cacheable: bool = True
# True if any query unit has transaction control commands, like COMMIT,
# ROLLBACK, START TRANSACTION or SAVEPOINT-related commands
tx_control: bool = False
# Cardinality of the result set. Set to NO_RESULT if the
# unit group is not expected or desired to return data.
cardinality: enums.Cardinality = enums.Cardinality.NO_RESULT
out_type_data: bytes = sertypes.NULL_TYPE_DESC
out_type_id: bytes = sertypes.NULL_TYPE_ID.bytes
in_type_data: bytes = sertypes.NULL_TYPE_DESC
in_type_id: bytes = sertypes.NULL_TYPE_ID.bytes
in_type_args: Optional[List[Param]] = None
in_type_args_real_count: int = 0
globals: Optional[list[tuple[str, bool]]] = None
# Cacheable QueryUnit is serialized in the compiler, so that the I/O server
# doesn't need to serialize it again for persistence.
_units: List[QueryUnit | bytes] = dataclasses.field(default_factory=list)
# This is a I/O server-only cache for unpacked QueryUnits
_unpacked_units: List[QueryUnit] | None = None
state_serializer: Optional[sertypes.StateSerializer] = None
@property
def units(self) -> List[QueryUnit]:
if self._unpacked_units is None:
self._unpacked_units = [
QueryUnit.deserialize(unit) if isinstance(unit, bytes) else unit
for unit in self._units
]
return self._unpacked_units
def __iter__(self) -> Iterator[QueryUnit]:
return iter(self.units)
def __len__(self) -> int:
return len(self._units)
def __getitem__(self, item: int) -> QueryUnit:
return self.units[item]
def maybe_get_serialized(self, item: int) -> bytes | None:
unit = self._units[item]
if isinstance(unit, bytes):
return unit
return None
def append(
self,
query_unit: QueryUnit,
serialize: bool = True,
) -> None:
self.capabilities |= query_unit.capabilities
if not query_unit.cacheable:
self.cacheable = False
if query_unit.tx_control:
self.tx_control = True
self.cardinality = query_unit.cardinality
self.out_type_data = query_unit.out_type_data
self.out_type_id = query_unit.out_type_id
self.in_type_data = query_unit.in_type_data
self.in_type_id = query_unit.in_type_id
self.in_type_args = query_unit.in_type_args
self.in_type_args_real_count = query_unit.in_type_args_real_count
if query_unit.globals is not None:
if self.globals is None:
self.globals = []
self.globals.extend(query_unit.globals)
if not serialize or query_unit.cache_sql is None:
self._units.append(query_unit)
else:
self._units.append(query_unit.serialize())
@dataclasses.dataclass(frozen=True, kw_only=True)
class PreparedStmtOpData:
"""Common prepared statement metadata"""
stmt_name: str
"""Original statement name as passed by the frontend"""
be_stmt_name: bytes = b""
"""Computed statement name as passed to the backend"""
@dataclasses.dataclass(frozen=True, kw_only=True)
class PrepareData(PreparedStmtOpData):
"""PREPARE statement data"""
query: str
"""Translated query string"""
translation_data: Optional[pgcodegen.TranslationData] = None
"""Translation source map"""
@dataclasses.dataclass(frozen=True, kw_only=True)
class ExecuteData(PreparedStmtOpData):
"""EXECUTE statement data"""
pass
@dataclasses.dataclass(frozen=True, kw_only=True)
class DeallocateData(PreparedStmtOpData):
"""DEALLOCATE statement data"""
pass
@dataclasses.dataclass(kw_only=True)
class SQLQueryUnit:
query: str
"""Translated query text."""
orig_query: str
"""Original query text before translation."""
translation_data: Optional[pgcodegen.TranslationData] = None
"""Translation source map."""
fe_settings: SQLSettings
"""Frontend-only settings effective during translation of this unit."""
tx_action: Optional[TxAction] = None
tx_chain: bool = False
sp_name: Optional[str] = None
prepare: Optional[PrepareData] = None
execute: Optional[ExecuteData] = None
deallocate: Optional[DeallocateData] = None
set_vars: Optional[dict[Optional[str], Optional[str | list[str]]]] = None
get_var: Optional[str] = None
is_local: bool = False
stmt_name: bytes = b""
"""Computed prepared statement name for this query."""
frontend_only: bool = False
"""Whether the query is completely emulated outside of backend and so
the response should be synthesized also."""
command_tag: bytes = b""
"""If frontend_only is True, only issue CommandComplete with this tag."""
@dataclasses.dataclass
class ParsedDatabase:
user_schema_pickle: bytes
schema_version: uuid.UUID
database_config: immutables.Map[str, config.SettingValue]
ext_config_settings: list[config.Setting]
protocol_version: defines.ProtocolVersion
state_serializer: sertypes.StateSerializer
SQLSettings = immutables.Map[Optional[str], Optional[str | list[str]]]
DEFAULT_SQL_SETTINGS: SQLSettings = immutables.Map()
DEFAULT_SQL_FE_SETTINGS: SQLSettings = immutables.Map({
"search_path": "public",
"server_version": defines.PGEXT_POSTGRES_VERSION,
"server_version_num": str(defines.PGEXT_POSTGRES_VERSION_NUM),
})
@dataclasses.dataclass
class SQLTransactionState:
in_tx: bool
settings: SQLSettings
in_tx_settings: Optional[SQLSettings]
in_tx_local_settings: Optional[SQLSettings]
savepoints: list[tuple[str, SQLSettings, SQLSettings]]
def current_fe_settings(self) -> SQLSettings:
if self.in_tx:
return self.in_tx_settings or DEFAULT_SQL_FE_SETTINGS
else:
return self.in_tx_local_settings or DEFAULT_SQL_FE_SETTINGS
def get(self, name: str) -> Optional[str | list[str]]:
if self.in_tx:
assert self.in_tx_local_settings
return self.in_tx_local_settings[name]
else:
return self.settings[name]
def apply(self, query_unit: SQLQueryUnit) -> None:
if query_unit.tx_action == TxAction.COMMIT:
self.in_tx = False
self.settings = self.in_tx_settings # type: ignore
self.in_tx_settings = None
self.in_tx_local_settings = None
self.savepoints.clear()
elif query_unit.tx_action == TxAction.ROLLBACK:
self.in_tx = False
self.in_tx_settings = None
self.in_tx_local_settings = None
self.savepoints.clear()
elif query_unit.tx_action == TxAction.DECLARE_SAVEPOINT:
self.savepoints.append((
query_unit.sp_name,
self.in_tx_settings,
self.in_tx_local_settings,
)) # type: ignore
elif query_unit.tx_action == TxAction.ROLLBACK_TO_SAVEPOINT:
while self.savepoints:
sp_name, settings, local_settings = self.savepoints[-1]
if query_unit.sp_name == sp_name:
self.in_tx_settings = settings
self.in_tx_local_settings = local_settings
break
else:
self.savepoints.pop(0)
else:
raise errors.TransactionError(
f'savepoint "{query_unit.sp_name}" does not exist'
)
if not self.in_tx:
# Always start an implicit transaction here, because in the
# compiler, multiple apply() calls only happen in simple query,
# and any query would start an implicit transaction. For example,
# we need to support a single ROLLBACK without a matching BEGIN
# rolling back an implicit transaction.
self.in_tx = True
self.in_tx_settings = self.settings
self.in_tx_local_settings = self.settings
if query_unit.frontend_only and query_unit.set_vars:
for name, value in query_unit.set_vars.items():
self.set(name, value, query_unit.is_local)
def set(
self, name: Optional[str], value: str | list[str] | None,
is_local: bool
) -> None:
def _set(attr_name: str) -> None:
settings = getattr(self, attr_name)
if value is None:
if name in settings:
settings = settings.delete(name)
else:
settings = settings.set(name, value)
setattr(self, attr_name, settings)
if self.in_tx:
_set("in_tx_local_settings")
if not is_local:
_set("in_tx_settings")
elif not is_local:
_set("settings")
#############################
class ProposedMigrationStep(NamedTuple):
statements: Tuple[str, ...]
confidence: float
prompt: str
prompt_id: str
data_safe: bool
required_user_input: tuple[dict[str, str], ...]
# This isn't part of the output data, but is used to figure out
# what to prohibit when something is rejected.
operation_key: s_delta.CommandKey
def to_json(self) -> Dict[str, Any]:
return {
'statements': [{'text': stmt} for stmt in self.statements],
'confidence': self.confidence,
'prompt': self.prompt,
'prompt_id': self.prompt_id,
'data_safe': self.data_safe,
'required_user_input': list(self.required_user_input)
}
class MigrationState(NamedTuple):
parent_migration: Optional[s_migrations.Migration]
initial_schema: s_schema.Schema
initial_savepoint: Optional[str]
target_schema: s_schema.Schema
guidance: s_obj.DeltaGuidance
accepted_cmds: Tuple[qlast.Base, ...]
last_proposed: Optional[Tuple[ProposedMigrationStep, ...]]
class MigrationRewriteState(NamedTuple):
initial_savepoint: Optional[str]
target_schema: s_schema.Schema
accepted_migrations: Tuple[qlast.CreateMigration, ...]
class TransactionState(NamedTuple):
id: int
name: Optional[str]
user_schema: s_schema.FlatSchema
global_schema: s_schema.FlatSchema
modaliases: immutables.Map[Optional[str], str]
session_config: immutables.Map[str, config.SettingValue]
database_config: immutables.Map[str, config.SettingValue]
system_config: immutables.Map[str, config.SettingValue]
cached_reflection: immutables.Map[str, Tuple[str, ...]]
tx: Transaction
migration_state: Optional[MigrationState] = None
migration_rewrite_state: Optional[MigrationRewriteState] = None
class Transaction:
_savepoints: Dict[int, TransactionState]
_constate: CompilerConnectionState
def __init__(
self,
constate: CompilerConnectionState,
*,
user_schema: s_schema.FlatSchema,
global_schema: s_schema.FlatSchema,
modaliases: immutables.Map[Optional[str], str],
session_config: immutables.Map[str, config.SettingValue],
database_config: immutables.Map[str, config.SettingValue],
system_config: immutables.Map[str, config.SettingValue],
cached_reflection: immutables.Map[str, Tuple[str, ...]],
implicit: bool = True,
) -> None:
assert not isinstance(user_schema, s_schema.ChainedSchema)
self._constate = constate
self._id = constate._new_txid()
self._implicit = implicit
self._current = TransactionState(
id=self._id,
name=None,
user_schema=user_schema,
global_schema=global_schema,
modaliases=modaliases,
session_config=session_config,
database_config=database_config,
system_config=system_config,
cached_reflection=cached_reflection,
tx=self,
)
self._state0 = self._current
self._savepoints = {}
@property
def id(self) -> int:
return self._id
def is_implicit(self) -> bool:
return self._implicit
def make_explicit(self) -> None:
if self._implicit:
self._implicit = False
else:
raise errors.TransactionError('already in explicit transaction')
def declare_savepoint(self, name: str) -> int:
if self.is_implicit():
raise errors.TransactionError(
'savepoints can only be used in transaction blocks')
return self._declare_savepoint(name)
def start_migration(self) -> str:
name = str(uuid.uuid4())
self._declare_savepoint(name)
return name
def _declare_savepoint(self, name: str) -> int:
sp_id = self._constate._new_txid()
sp_state = self._current._replace(id=sp_id, name=name)
self._savepoints[sp_id] = sp_state
self._constate._savepoints_log[sp_id] = sp_state
return sp_id
def rollback_to_savepoint(self, name: str) -> TransactionState:
if self.is_implicit():
raise errors.TransactionError(
'savepoints can only be used in transaction blocks')
return self._rollback_to_savepoint(name)
def abort_migration(self, name: str) -> None:
self._rollback_to_savepoint(name)
def _rollback_to_savepoint(self, name: str) -> TransactionState:
sp_ids_to_erase = []
for sp in reversed(self._savepoints.values()):
if sp.name == name:
self._current = sp
break
sp_ids_to_erase.append(sp.id)
else:
raise errors.TransactionError(f'there is no {name!r} savepoint')
for sp_id in sp_ids_to_erase:
self._savepoints.pop(sp_id)
return sp
def release_savepoint(self, name: str) -> None:
if self.is_implicit():
raise errors.TransactionError(
'savepoints can only be used in transaction blocks')
self._release_savepoint(name)
def commit_migration(self, name: str) -> None:
self._release_savepoint(name)
def _release_savepoint(self, name: str) -> None:
sp_ids_to_erase = []
for sp in reversed(self._savepoints.values()):
sp_ids_to_erase.append(sp.id)
if sp.name == name:
break
else:
raise errors.TransactionError(f'there is no {name!r} savepoint')
for sp_id in sp_ids_to_erase:
self._savepoints.pop(sp_id)
def get_schema(self, std_schema: s_schema.FlatSchema) -> s_schema.Schema:
assert isinstance(std_schema, s_schema.FlatSchema)
return s_schema.ChainedSchema(
std_schema,
self._current.user_schema,
self._current.global_schema,
)
def get_user_schema(self) -> s_schema.FlatSchema:
return self._current.user_schema
def get_user_schema_if_updated(self) -> Optional[s_schema.FlatSchema]:
if self._current.user_schema is self._state0.user_schema:
return None
else:
return self._current.user_schema
def get_global_schema(self) -> s_schema.FlatSchema:
return self._current.global_schema
def get_global_schema_if_updated(self) -> Optional[s_schema.FlatSchema]:
if self._current.global_schema is self._state0.global_schema:
return None
else:
return self._current.global_schema
def get_modaliases(self) -> immutables.Map[Optional[str], str]:
return self._current.modaliases
def get_session_config(self) -> immutables.Map[str, config.SettingValue]:
return self._current.session_config
def get_database_config(self) -> immutables.Map[str, config.SettingValue]:
return self._current.database_config
def get_system_config(self) -> immutables.Map[str, config.SettingValue]:
return self._current.system_config
def get_cached_reflection_if_updated(self) -> Optional[
immutables.Map[str, Tuple[str, ...]]
]:
if self._current.cached_reflection == self._state0.cached_reflection:
return None
else:
return self._current.cached_reflection
def get_cached_reflection(self) -> immutables.Map[str, Tuple[str, ...]]:
return self._current.cached_reflection
def get_migration_state(self) -> Optional[MigrationState]:
return self._current.migration_state
def get_migration_rewrite_state(self) -> Optional[MigrationRewriteState]:
return self._current.migration_rewrite_state
def update_schema(self, new_schema: s_schema.Schema) -> None:
assert isinstance(new_schema, s_schema.ChainedSchema)
user_schema = new_schema.get_top_schema()
assert isinstance(user_schema, s_schema.FlatSchema)
global_schema = new_schema.get_global_schema()
assert isinstance(global_schema, s_schema.FlatSchema)
self._current = self._current._replace(
user_schema=user_schema,
global_schema=global_schema,
)
def update_modaliases(
self, new_modaliases: immutables.Map[Optional[str], str]
) -> None:
self._current = self._current._replace(modaliases=new_modaliases)
def update_session_config(
self, new_config: immutables.Map[str, config.SettingValue]
) -> None:
self._current = self._current._replace(session_config=new_config)
def update_database_config(
self, new_config: immutables.Map[str, config.SettingValue]
) -> None:
self._current = self._current._replace(database_config=new_config)
def update_cached_reflection(
self,
new: immutables.Map[str, Tuple[str, ...]],
) -> None:
self._current = self._current._replace(cached_reflection=new)
def update_migration_state(
self, mstate: Optional[MigrationState]
) -> None:
self._current = self._current._replace(migration_state=mstate)
def update_migration_rewrite_state(
self, mrstate: Optional[MigrationRewriteState]
) -> None:
self._current = self._current._replace(migration_rewrite_state=mrstate)
class CompilerConnectionState:
__slots__ = ('_savepoints_log', '_current_tx', '_tx_count',)
_savepoints_log: Dict[int, TransactionState]
def __init__(
self,
*,
user_schema: s_schema.Schema,
global_schema: s_schema.Schema,
modaliases: immutables.Map[Optional[str], str],
session_config: immutables.Map[str, config.SettingValue],
database_config: immutables.Map[str, config.SettingValue],
system_config: immutables.Map[str, config.SettingValue],
cached_reflection: immutables.Map[str, Tuple[str, ...]],
):
self._tx_count = time.monotonic_ns()
self._init_current_tx(
user_schema=user_schema,
global_schema=global_schema,
modaliases=modaliases,
session_config=session_config,
database_config=database_config,
system_config=system_config,
cached_reflection=cached_reflection,
)
self._savepoints_log = {}
def _new_txid(self) -> int:
self._tx_count += 1
return self._tx_count
def _init_current_tx(
self,
*,
user_schema: s_schema.Schema,
global_schema: s_schema.Schema,
modaliases: immutables.Map[Optional[str], str],
session_config: immutables.Map[str, config.SettingValue],
database_config: immutables.Map[str, config.SettingValue],
system_config: immutables.Map[str, config.SettingValue],
cached_reflection: immutables.Map[str, Tuple[str, ...]],
) -> None:
assert isinstance(user_schema, s_schema.FlatSchema)
assert isinstance(global_schema, s_schema.FlatSchema)
self._current_tx = Transaction(
self,
user_schema=user_schema,
global_schema=global_schema,
modaliases=modaliases,
session_config=session_config,
database_config=database_config,
system_config=system_config,
cached_reflection=cached_reflection,
)
def can_sync_to_savepoint(self, spid: int) -> bool:
return spid in self._savepoints_log
def sync_to_savepoint(self, spid: int) -> None:
"""Synchronize the compiler state with the current DB state."""
if not self.can_sync_to_savepoint(spid):
raise RuntimeError(f'failed to lookup savepoint with id={spid}')
sp = self._savepoints_log[spid]
self._current_tx = sp.tx
self._current_tx._current = sp
self._current_tx._id = spid
# Cleanup all savepoints declared after the one we rolled back to
# in the transaction we have now set as current.
for id in tuple(self._current_tx._savepoints):
if id > spid:
self._current_tx._savepoints.pop(id)
# Cleanup all savepoints declared after the one we rolled back to
# in the global savepoints log.
for id in tuple(self._savepoints_log):
if id > spid:
self._savepoints_log.pop(id)
def current_tx(self) -> Transaction:
return self._current_tx
def start_tx(self) -> None:
if self._current_tx.is_implicit():
self._current_tx.make_explicit()
else:
raise errors.TransactionError('already in transaction')